From 68397dfbbc05f27b8174b6c73f787e9d729f36e7 Mon Sep 17 00:00:00 2001 From: VyacheslavSemin Date: Mon, 5 Aug 2024 08:29:11 +0000 Subject: [PATCH 01/26] Initial release --- .helmignore | 27 + CHANGELOG.md | 5 + Chart.yaml | 20 + README.md | 883 ++++++ sources/extraScrapeConfigs.yaml | 6 + sources/ingress_values.yaml | 19 + sources/litmus/deploy-Kubernetes-Docs.sh | 436 +++ sources/litmus/docs-node.yaml | 15 + sources/litmus/docs-status.sh | 15 + .../docs-chaos-container-kill.yaml | 41 + .../experiments/docs-chaos-pod-cpu-hog.yaml | 39 + .../experiments/docs-chaos-pod-delete.yaml | 36 + .../docs-chaos-pod-memory-hog.yaml | 40 + .../docs-chaos-pod-network-duplication.yaml | 38 + .../docs-chaos-pod-network-latency.yaml | 38 + .../docs-chaos-pod-network-loss.yaml | 38 + sources/litmus/rbac/container-kill-rbac.yaml | 57 + sources/litmus/rbac/pod-cpu-hog-rbac.yaml | 57 + sources/litmus/rbac/pod-delete-rbac.yaml | 54 + sources/litmus/rbac/pod-memory-hog-rbac.yaml | 57 + .../rbac/pod-network-duplication-rbac.yaml | 56 + sources/litmus/rbac/pod-network-latency.yaml | 57 + .../litmus/rbac/pod-network-loss-rbac.yaml | 56 + .../documentserver-statsd-exporter.json | 2797 +++++++++++++++++ .../metrics/kubernetes-cluster-resourses.json | 2118 +++++++++++++ sources/scc/docs-components.yaml | 21 + sources/scc/helm-components.yaml | 21 + sources/scripts/add_shardkey.py | 143 + sources/scripts/createdb.sql | 73 + sources/scripts/get_logs.sh | 4 + sources/scripts/remove_shardkey.py | 161 + sources/scripts/test_ds.py | 166 + sources/shutdown-ds.yaml | 39 + templates/NOTES.txt | 19 + templates/RBAC/dashboard-role.yaml | 22 + templates/RBAC/dashboard-rolebinding.yaml | 26 + templates/_helpers.tpl | 248 ++ templates/configmaps/add-shardkey.yaml | 14 + templates/configmaps/balancer-lua.yaml | 395 +++ templates/configmaps/balancer-snippet.yaml | 230 ++ templates/configmaps/config.yaml | 24 + templates/configmaps/createdb.yaml | 14 + templates/configmaps/dashboard.yaml | 37 + templates/configmaps/documentserver.yaml | 71 + templates/configmaps/example.yaml | 16 + templates/configmaps/grafana.yaml | 19 + templates/configmaps/remove-shardkey.yaml | 14 + templates/configmaps/welcome-page.yaml | 23 + templates/deployments/documentserver.yaml | 402 +++ templates/hpa/documentserver.yaml | 46 + templates/ingresses/documentserver.yaml | 48 + templates/ingresses/grafana.yaml | 40 + templates/jobs/dashboard.yaml | 86 + templates/pvc/ds-service-files.yaml | 23 + templates/secrets/grafana-datasource.yaml | 23 + templates/secrets/info-auth.yaml | 17 + templates/secrets/jwt.yaml | 38 + templates/secrets/license.yaml | 15 + templates/secrets/redis-password.yaml | 20 + templates/serviceaccount/dashboard.yaml | 19 + templates/serviceaccount/documentserver.yaml | 19 + templates/services/docservice.yaml | 24 + templates/services/documentserver.yaml | 36 + templates/services/example.yaml | 27 + templates/statefulset/example.yaml | 104 + templates/tests/test-ds-cm.yaml | 16 + templates/tests/test-ds-pod.yaml | 82 + values.yaml | 1205 +++++++ 68 files changed, 11095 insertions(+) create mode 100644 .helmignore create mode 100644 CHANGELOG.md create mode 100644 Chart.yaml create mode 100644 sources/extraScrapeConfigs.yaml create mode 100644 sources/ingress_values.yaml create mode 100755 sources/litmus/deploy-Kubernetes-Docs.sh create mode 100644 sources/litmus/docs-node.yaml create mode 100755 sources/litmus/docs-status.sh create mode 100644 sources/litmus/experiments/docs-chaos-container-kill.yaml create mode 100644 sources/litmus/experiments/docs-chaos-pod-cpu-hog.yaml create mode 100644 sources/litmus/experiments/docs-chaos-pod-delete.yaml create mode 100644 sources/litmus/experiments/docs-chaos-pod-memory-hog.yaml create mode 100644 sources/litmus/experiments/docs-chaos-pod-network-duplication.yaml create mode 100644 sources/litmus/experiments/docs-chaos-pod-network-latency.yaml create mode 100644 sources/litmus/experiments/docs-chaos-pod-network-loss.yaml create mode 100644 sources/litmus/rbac/container-kill-rbac.yaml create mode 100644 sources/litmus/rbac/pod-cpu-hog-rbac.yaml create mode 100644 sources/litmus/rbac/pod-delete-rbac.yaml create mode 100644 sources/litmus/rbac/pod-memory-hog-rbac.yaml create mode 100644 sources/litmus/rbac/pod-network-duplication-rbac.yaml create mode 100644 sources/litmus/rbac/pod-network-latency.yaml create mode 100644 sources/litmus/rbac/pod-network-loss-rbac.yaml create mode 100644 sources/metrics/documentserver-statsd-exporter.json create mode 100644 sources/metrics/kubernetes-cluster-resourses.json create mode 100644 sources/scc/docs-components.yaml create mode 100644 sources/scc/helm-components.yaml create mode 100644 sources/scripts/add_shardkey.py create mode 100644 sources/scripts/createdb.sql create mode 100755 sources/scripts/get_logs.sh create mode 100644 sources/scripts/remove_shardkey.py create mode 100755 sources/scripts/test_ds.py create mode 100644 sources/shutdown-ds.yaml create mode 100644 templates/NOTES.txt create mode 100644 templates/RBAC/dashboard-role.yaml create mode 100644 templates/RBAC/dashboard-rolebinding.yaml create mode 100644 templates/_helpers.tpl create mode 100644 templates/configmaps/add-shardkey.yaml create mode 100644 templates/configmaps/balancer-lua.yaml create mode 100644 templates/configmaps/balancer-snippet.yaml create mode 100644 templates/configmaps/config.yaml create mode 100644 templates/configmaps/createdb.yaml create mode 100644 templates/configmaps/dashboard.yaml create mode 100644 templates/configmaps/documentserver.yaml create mode 100644 templates/configmaps/example.yaml create mode 100644 templates/configmaps/grafana.yaml create mode 100644 templates/configmaps/remove-shardkey.yaml create mode 100644 templates/configmaps/welcome-page.yaml create mode 100644 templates/deployments/documentserver.yaml create mode 100644 templates/hpa/documentserver.yaml create mode 100644 templates/ingresses/documentserver.yaml create mode 100644 templates/ingresses/grafana.yaml create mode 100644 templates/jobs/dashboard.yaml create mode 100644 templates/pvc/ds-service-files.yaml create mode 100644 templates/secrets/grafana-datasource.yaml create mode 100644 templates/secrets/info-auth.yaml create mode 100644 templates/secrets/jwt.yaml create mode 100644 templates/secrets/license.yaml create mode 100644 templates/secrets/redis-password.yaml create mode 100644 templates/serviceaccount/dashboard.yaml create mode 100644 templates/serviceaccount/documentserver.yaml create mode 100644 templates/services/docservice.yaml create mode 100644 templates/services/documentserver.yaml create mode 100644 templates/services/example.yaml create mode 100644 templates/statefulset/example.yaml create mode 100644 templates/tests/test-ds-cm.yaml create mode 100644 templates/tests/test-ds-pod.yaml create mode 100644 values.yaml diff --git a/.helmignore b/.helmignore new file mode 100644 index 0000000..48ddc28 --- /dev/null +++ b/.helmignore @@ -0,0 +1,27 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.helmignore +.git/ +.github/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +# Test dirs +sources/litmus/ diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..8fc0566 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,5 @@ +# Changelog + +## 1.0.0 + +* Initial release diff --git a/Chart.yaml b/Chart.yaml new file mode 100644 index 0000000..bf69783 --- /dev/null +++ b/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +name: docs-shards +description: Helm chart for installing ONLYOFFICE Docs Shards in Kubernetes + +type: application + +version: 1.0.0 + +appVersion: 8.1.1 + +dependencies: +- name: ingress-nginx + version: 4.9.0 + repository: https://kubernetes.github.io/ingress-nginx + condition: ingress-nginx.enabled + +- name: redis + version: 19.5.3 + repository: https://charts.bitnami.com/bitnami + condition: redis.enabled diff --git a/README.md b/README.md index aa1b079..d6b210e 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,885 @@ # Kubernetes-Docs-Shards ONLYOFFICE Docs for Kubernetes + +## Contents +- [Introduction](#introduction) +- [Deploy prerequisites](#deploy-prerequisites) + * [1. Add Helm repositories](#1-add-helm-repositories) + * [2. Install Persistent Storage](#2-install-persistent-storage) + * [3. Configure dependent charts](#3-configure-dependent-charts) + + [3.1 Configure redis/bitnami subchart](#31-configure-redisbitnami-subchart) + + [3.2 Configure ingress-nginx/kubernetes subchart](#32-configure-ingress-nginxkubernetes-subchart) + * [4. Deploy StatsD exporter](#4-deploy-statsd-exporter) + + [4.1 Add Helm repositories](#41-add-helm-repositories) + + [4.2 Installing Prometheus](#42-installing-prometheus) + + [4.3 Installing StatsD exporter](#43-installing-statsd-exporter) + * [5. Make changes to Node-config configuration files](#5-make-changes-to-Node-config-configuration-files) + + [5.1 Create a ConfigMap containing a json file](#51-create-a-configmap-containing-a-json-file) + + [5.2 Specify parameters when installing ONLYOFFICE Docs](#52-specify-parameters-when-installing-onlyoffice-docs) + * [6. Add custom Fonts](#6-add-custom-fonts) + * [7. Add Plugins](#7-add-plugins) + * [8. Change interface themes](#8-change-interface-themes) + + [8.1 Create a ConfigMap containing a json file](#81-create-a-configmap-containing-a-json-file) + + [8.2 Specify parameters when installing ONLYOFFICE Docs](#82-specify-parameters-when-installing-onlyoffice-docs) +- [Deploy ONLYOFFICE Docs](#deploy-onlyoffice-docs) + * [1. Deploy the ONLYOFFICE Docs license](#1-deploy-the-onlyoffice-docs-license) + + [1.1 Create secret](#11-create-secret) + + [1.2 Specify parameters when installing ONLYOFFICE Docs](#12-specify-parameters-when-installing-onlyoffice-docs) + * [2. Deploy ONLYOFFICE Docs](#2-deploy-onlyoffice-docs) + * [3. Uninstall ONLYOFFICE Docs](#3-uninstall-onlyoffice-docs) + * [4. Parameters](#4-parameters) + * [5. Configuration and installation details](#5-configuration-and-installation-details) + * [5.1 Example deployment (optional)](#51-example-deployment-optional) + * [5.2 Metrics deployment (optional)](#52-metrics-deployment-optional) + * [5.3 Expose ONLYOFFICE Docs via HTTPS](#53-expose-onlyoffice-docs-via-https) + * [6. Scale ONLYOFFICE Docs (optional)](#6-scale-onlyoffice-docs-optional) + + [6.1 Horizontal Pod Autoscaling](#61-horizontal-pod-autoscaling) + + [6.2 Manual scaling](#62-manual-scaling) + * [7. Update ONLYOFFICE Docs license (optional)](#7-update-onlyoffice-docs-license-optional) + * [8. ONLYOFFICE Docs installation test (optional)](#8-onlyoffice-docs-installation-test-optional) + * [9. Access to the info page (optional)](#9-access-to-the-info-page-optional) + * [10. Deploy ONLYOFFICE Docs with your own dependency (optional)](#10-deploy-onlyoffice-docs-with-your-own-dependency-optional) + * [10.1 Use your own Redis](#101-use-your-own-redis) + + [10.1.1 Connect ONLYOFFICE Docs to Redis using password](#1011-connect-to-redis-using-password) + + [10.1.2 Connect ONLYOFFICE Docs to Redis using existing secret](#1012-alternative-connect-to-redis-using-existing-secret) + * [10.2 Use your own nginx-ingress controller](#102-use-your-own-nginx-ingress-controller) +- [Using Grafana to visualize metrics (optional)](#using-grafana-to-visualize-metrics-optional) + * [1. Deploy Grafana](#1-deploy-grafana) + + [1.1 Deploy Grafana without installing ready-made dashboards](#11-deploy-grafana-without-installing-ready-made-dashboards) + + [1.2 Deploy Grafana with the installation of ready-made dashboards](#12-deploy-grafana-with-the-installation-of-ready-made-dashboards) + * [2 Access to Grafana via Ingress](#2-access-to-grafana-via-ingress) + * [3. View gathered metrics in Grafana](#3-view-gathered-metrics-in-grafana) + +## Introduction + +- You must have a Kubernetes cluster installed. Please, checkout [the reference](https://kubernetes.io/docs/setup/) to set up Kubernetes. +- You should also have a local configured copy of `kubectl`. See [this](https://kubernetes.io/docs/tasks/tools/install-kubectl/) guide how to install and configure `kubectl`. +- You should install Helm v3.7+. Please follow the instruction [here](https://helm.sh/docs/intro/install/) to install it. + +## Deploy prerequisites + +### 1. Add Helm repositories + +```bash +$ helm repo add nfs-server-provisioner https://kubernetes-sigs.github.io/nfs-ganesha-server-and-external-provisioner +$ helm repo add onlyoffice https://download.onlyoffice.com/charts/stable +$ helm repo update +``` + +### 2. Install Persistent Storage + +Install NFS Server Provisioner + +Note: Pesistent storage will be used for forgotten and error files + +Note: When installing NFS Server Provisioner, Storage Classes - `NFS` is created. + +```bash +$ helm install nfs-server nfs-server-provisioner/nfs-server-provisioner \ + --set persistence.enabled=true \ + --set persistence.storageClass=PERSISTENT_STORAGE_CLASS \ + --set persistence.size=PERSISTENT_SIZE +``` + +- `PERSISTENT_STORAGE_CLASS` is a Persistent Storage Class available in your Kubernetes cluster. + + Persistent Storage Classes for different providers: + - Amazon EKS: `gp2` + - Digital Ocean: `do-block-storage` + - IBM Cloud: Default `ibmc-file-bronze`. [More storage classes](https://cloud.ibm.com/docs/containers?topic=containers-file_storage) + - Yandex Cloud: `yc-network-hdd` or `yc-network-ssd`. [More details](https://cloud.yandex.ru/docs/managed-kubernetes/operations/volumes/manage-storage-class) + - minikube: `standard` + - k3s: `local-path` + +- `PERSISTENT_SIZE` is the total size of all Persistent Storages for the nfs Persistent Storage Class. You can express the size as a plain integer with one of these suffixes: `T`, `G`, `M`, `Ti`, `Gi`, `Mi`. For example: `9Gi`. + +See more details about installing NFS Server Provisioner via Helm [here](https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner/tree/master/charts/nfs-server-provisioner). + +Configure a Persistent Volume Claim + +*The PersistentVolume type to be used for PVC placement must support Access Mode [ReadWriteMany](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes).* +*Also, PersistentVolume must have as the owner the user from whom the ONLYOFFICE Docs will be started. By default it is `ds` (101:101).* + +Note: If you want to enable `WOPI`, please set the parameter `wopi.enabled=true`. In this case Persistent Storage must be connected to the cluster nodes with the disabled caching attributes for the mounted directory for the clients. For NFS Server Provisioner it can be achieved by adding `noac` option to the parameter `storageClass.mountOptions`. Please find more information [here](https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner/blob/master/charts/nfs-server-provisioner/values.yaml#L83). + +### 3. Configure dependent charts + +ONLYOFFICE Docs use redis by bitnami and ingress-nginx by kubernetes as dependencies charts. This bundle ingress-nginx+redis is used to implement balancing in sharded mode. You can manage the configuration of dependent charts, or disable them to use your dependencies. + +If you want to manage the configuration of dependent charts, please check section [#3.1](#31-configure-redisbitnami-subchart) for Redis and [#3.2](#32-configure-ingress-nginxkubernetes-subchart) for ingress-nginx controller + +(Optional) Also, you can use your own Redis or ingress-nginx controller, for more information please refer to step [#10](#10-deploy-onlyoffice-docs-with-your-own-dependency-optional) + +#### 3.1 Configure redis/bitnami subchart + +Redis/bitnami subchart is **enabled by default** + +Note: Set the `redis.metrics.enabled=true` to enable exposing Redis metrics to be gathered by Prometheus. + +Some overridden values ​​for the Redis/Bitnami subchart can be found in the table below: + +### Redis subchart parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `redis.master.persistence.size` | Persistent Volume size | `8Gi` | +| `redis.enabled` | Define that to enable or disable Redis/Bitnami subchart during deployment | `true` | +| `redis.architecture` | Redis® architecture. Allowed values: standalone or replication | `standalone` | +| `redis.secretAnnotations` | Annotations to add to secret. Some service annotations added for correct deployment along with the ONLYOFFICE Docs chart | `helm.sh/hook: pre-install helm.sh/hook-weight: "1"` | +| `redis.master.persistence.storageClass` | Persistent Volume storage class | `""` | +| `redis.metric.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` | +| `redis.auth.password` | Redis® password | `""` | + +See more details about installing Redis via Helm [here](https://github.com/bitnami/charts/tree/main/bitnami/redis). + +#### 3.2 Configure ingress-nginx/kubernetes subchart + +ingress-nginx/kubernetes subchart is **enabled by default** + +Docs working in high scalability mode (more than 1 shard) only with enabled ingress-nginx controller by kubernetes. + +### Ingress-nginx subchart parameters + +Some overridden values ​​for the ingress-nginx/Kubernetes subchart can be found in the table below: + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `ingress-nginx.enabled` | Define that to enable or disable ingress-nginx subchart during deployment | `true` | +| `ingress-nginx.controller.replicaCount` | Number of deployed controller replicas | `2` | +| `ingress-nginx.namespaceOverride` | Override the ingress-nginx deployment namespace | `default` | +| `ingress-nginx.controller.allowSnippetAnnotations` | This configuration defines if Ingress Controller should allow users to set their own *-snippet annotations, otherwise this is forbidden / dropped when users add those annotations. Global snippets in ConfigMap are still respected | `true` | +| `ingress-nginx.service.annotations` | Annotations to be added to the external controller service. See controller.service.internal.annotations for annotations to be added to the internal controller service. | `{}` | +| `ingress-nginx.controller.extraVolumeMounts` | Additional volumeMounts to the controller main container. Note: These parameters are used to add configuration to allow custom balancing. For more information please check values.yaml | `[]` | +| `ingress-nginx.controller.extraVolumes` | Additional volumes to the controller pod. Note: These parameters are used to add configuration to allow custom balancing. For more information please check values.yaml | `[]` | + +See more details about installing ingress-nginx via Helm [here](https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx). + +### 4. Deploy StatsD exporter + +*This step is optional. You can skip step [#4](#4-deploy-statsd-exporter) entirely if you don't want to run StatsD exporter* + +#### 4.1 Add Helm repositories + +```bash +$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +$ helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics +$ helm repo update +``` + +#### 4.2 Installing Prometheus + +To install Prometheus to your cluster, run the following command: + +```bash +$ helm install prometheus -f https://raw.githubusercontent.com/ONLYOFFICE/Kubernetes-Docs/master/sources/extraScrapeConfigs.yaml prometheus-community/prometheus \ + --set server.global.scrape_interval=1m +``` + +To change the scrape interval, specify the `server.global.scrape_interval` parameter. + +See more details about installing Prometheus via Helm [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus). + +#### 4.3 Installing StatsD exporter + +To install StatsD exporter to your cluster, run the following command: + +``` +$ helm install statsd-exporter prometheus-community/prometheus-statsd-exporter \ + --set statsd.udpPort=8125 \ + --set statsd.tcpPort=8126 \ + --set statsd.eventFlushInterval=30000ms +``` + +See more details about installing Prometheus StatsD exporter via Helm [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-statsd-exporter). + +To allow the StatsD metrics in ONLYOFFICE Docs, follow step [5.2](#52-metrics-deployment-optional) + +### 5. Make changes to Node-config configuration files + +*This step is optional. You can skip step [#5](#5-make-changes-to-node-config-configuration-files) entirely if you don't need to make changes to the configuration files* + +#### 5.1 Create a ConfigMap containing a json file + +In order to create a ConfigMap from a file that contains the `production-linux-local.json` structure, you need to run the following command: + +```bash +$ kubectl create configmap custom-local-config \ + --from-file=./production-linux-local.json +``` + +Note: Any name except `local-config` can be used instead of `custom-local-config`. + +#### 5.2 Specify parameters when installing ONLYOFFICE Docs + +When installing ONLYOFFICE Docs, specify the `extraConf.configMap=custom-local-config` and `extraConf.filename=production-linux-local.json` parameters + +Note: If you need to add a configuration file after the ONLYOFFICE Docs is already installed, you need to execute step [5.1](#51-create-a-configmap-containing-a-json-file) +and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraConf.configMap=custom-local-config --set extraConf.filename=production-linux-local.json` command or +`helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. + +### 6. Add custom Fonts + +*This step is optional. You can skip step [#6](#6-add-custom-fonts) entirely if you don't need to add your fonts* + +In order to add fonts to images, you need to rebuild the images. Refer to the relevant steps in [this](https://github.com/ONLYOFFICE/Docker-Docs#building-onlyoffice-docs) manual. +Then specify your images when installing the ONLYOFFICE Docs. + +### 7. Add Plugins + +*This step is optional. You can skip step [#7](#7-add-plugins) entirely if you don't need to add plugins* + +In order to add plugins to images, you need to rebuild the images. Refer to the relevant steps in [this](https://github.com/ONLYOFFICE/Docker-Docs#building-onlyoffice-docs) manual. +Then specify your images when installing the ONLYOFFICE Docs. + +### 8. Change interface themes + +*This step is optional. You can skip step [#8](#8-change-interface-themes) entirely if you don't need to change the interface themes* + +#### 8.1 Create a ConfigMap containing a json file + +To create a ConfigMap with a json file that contains the interface themes, you need to run the following command: + +```bash +$ kubectl create configmap custom-themes \ + --from-file=./custom-themes.json +``` + +Note: Instead of `custom-themes` and `custom-themes.json` you can use any other names. + +#### 8.2 Specify parameters when installing ONLYOFFICE Docs + +When installing ONLYOFFICE Docs, specify the `extraThemes.configMap=custom-themes` and `extraThemes.filename=custom-themes.json` parameters. + +Note: If you need to add interface themes after the ONLYOFFICE Docs is already installed, you need to execute step [5.1](#51-create-a-configmap-containing-a-json-file) +and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraThemes.configMap=custom-themes --set extraThemes.filename=custom-themes.json` command or +`helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. + +## Deploy ONLYOFFICE Docs + +### 1. Deploy the ONLYOFFICE Docs license + +#### 1.1. Create secret + +If you have a valid ONLYOFFICE Docs license, create a secret `license` from the file: + +``` +$ kubectl create secret generic license --from-file=path/to/license.lic +``` + +Note: The source license file name should be 'license.lic' because this name would be used as a field in the created secret. + +#### 1.2. Specify parameters when installing ONLYOFFICE Docs + +When installing ONLYOFFICE Docs, specify the `license.existingSecret=license` parameter. + +``` +$ helm install documentserver onlyoffice/docs-shards --set license.existingSecret=license +``` + +Note: If you need to add license after the ONLYOFFICE Docs is already installed, you need to execute step [1.1](#11-create-secret) and then run the `helm upgrade documentserver onlyoffice/docs-shards --set license.existingSecret=license` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. + +### 2. Deploy ONLYOFFICE Docs + +To deploy ONLYOFFICE Docs with the release name `documentserver`: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set redis.master.persistence.storageClass=PERSISTENT_STORAGE_CLASS +``` + +The command deploys ONLYOFFICE Docs on the Kubernetes cluster in the default configuration. The [Parameters](#4-parameters) section lists the parameters that can be configured during installation. + +### 3. Uninstall ONLYOFFICE Docs + +To uninstall/delete the `documentserver` deployment: + +```bash +$ helm delete documentserver +``` + +The `helm delete` command removes all the Kubernetes components associated with the chart and deletes the release. + +### 4. Parameters + +### Common parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `connections.redisConnectorName` | Defines which connector to use to connect to Redis. If you need to connect to Redis Sentinel, set the value `ioredis` | `redis` | +| `connections.redistHost` | The IP address or the name of the Redis host | `redis-master` | +| `connections.redisPort` | The Redis server port number | `6379` | +| `connections.redisUser` | The Redis [user](https://redis.io/docs/management/security/acl/) name. The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file | `default` | +| `connections.redisDBNum` | Number of the redis logical database to be [selected](https://redis.io/commands/select/). The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file | `0` | +| `connections.redisClusterNodes` | List of nodes in the Redis cluster. There is no need to specify every node in the cluster, 3 should be enough. You can specify multiple values. It must be specified in the `host:port` format | `[]` | +| `connections.redisSentinelGroupName` | Name of a group of Redis instances composed of a master and one or more slaves. Used if `connections.redisConnectorName` is set to `ioredis` | `mymaster` | +| `connections.redisPassword` | The password set for the Redis account. If set to, it takes priority over the `connections.redisExistingSecret` and `redis.auth.password`. The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file| `""` | +| `connections.redisSecretKeyName` | The name of the key that contains the Redis user password | `redis-password` | +| `connections.redisExistingSecret` | Name of existing secret to use for Redis passwords. Must contain the key specified in `connections.redisSecretKeyName`. The password from this secret overrides password set in the `options` object in `local.json` | `redis` | +| `connections.redisNoPass` | Defines whether to use a Redis auth without a password. If the connection to Redis server does not require a password, set the value to `true` | `false` | +| `webProxy.enabled` | Specify whether a Web proxy is used in your network to access the Pods of k8s cluster to the Internet | `false` | +| `webProxy.http` | Web Proxy address for `HTTP` traffic | `http://proxy.example.com` | +| `webProxy.https` | Web Proxy address for `HTTPS` traffic | `https://proxy.example.com` | +| `webProxy.noProxy` | Patterns for IP addresses or k8s services name or domain names that shouldn’t use the Web Proxy | `localhost,127.0.0.1,docservice` | +| `privateCluster` | Specify whether the k8s cluster is used in a private network without internet access | `false` | +| `namespaceOverride` | The name of the namespace in which ONLYOFFICE Docs will be deployed. If not set, the name will be taken from `.Release.Namespace` | `""` | +| `commonLabels` | Defines labels that will be additionally added to all the deployed resources. You can also use `tpl` as the value for the key | `{}` | +| `commonAnnotations` | Defines annotations that will be additionally added to all the deployed resources. You can also use `tpl` as the value for the key. Some resources may override the values specified here with their own | `{}` | +| `serviceAccount.create` | Enable ServiceAccount creation | `false` | +| `serviceAccount.name` | Name of the ServiceAccount to be used. If not set and `serviceAccount.create` is `true` the name will be taken from `.Release.Name` or `serviceAccount.create` is `false` the name will be "default" | `""` | +| `serviceAccount.annotations` | Map of annotations to add to the ServiceAccount. If set to, it takes priority over the `commonAnnotations` | `{}` | +| `serviceAccount.automountServiceAccountToken` | Enable auto mount of ServiceAccountToken on the serviceAccount created. Used only if `serviceAccount.create` is `true` | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use. If not specified, a PVC named "ds-files" will be created | `""` | +| `persistence.annotations` | Defines annotations that will be additionally added to "ds-files" PVC. If set to, it takes priority over the `commonAnnotations` | `{}` | +| `persistence.storageClass` | PVC Storage Class for ONLYOFFICE Docs data volume | `nfs` | +| `persistence.size` | PVC Storage Request for ONLYOFFICE Docs volume | `8Gi` | +| `podSecurityContext.enabled` | Enable security context for the pods. If set to true, `podSecurityContext` is enabled for all resources describing the podTemplate. | `false` | +| `podSecurityContext.fsGroup` | Defines the Group ID to which the owner and permissions for all files in volumes are changed when mounted in the Pod | `101` | +| `podAntiAffinity.type` | Types of Pod antiaffinity. Allowed values: `soft` or `hard` | `soft` | +| `podAntiAffinity.topologyKey` | Node label key to match | `kubernetes.io/hostname` | +| `podAntiAffinity.weight` | Priority when selecting node. It is in the range from 1 to 100 | `100` | +| `nodeSelector` | Node labels for pods assignment. Each ONLYOFFICE Docs services can override the values specified here with its own | `{}` | +| `tolerations` | Tolerations for pods assignment. Each ONLYOFFICE Docs services can override the values specified here with its own | `[]` | +| `imagePullSecrets` | Container image registry secret name | `""` | +| `service.existing` | The name of an existing service for ONLYOFFICE Docs. If not specified, a service named `documentserver` will be created | `""` | +| `service.annotations` | Map of annotations to add to the ONLYOFFICE Docs service. If set to, it takes priority over the `commonAnnotations` | `{}` | +| `service.type` | ONLYOFFICE Docs service type | `ClusterIP` | +| `service.port` | ONLYOFFICE Docs service port | `8888` | +| `service.sessionAffinity` | [Session Affinity](https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity) for ONLYOFFICE Docs service. If not set, `None` will be set as the default value | `""` | +| `service.sessionAffinityConfig` | [Configuration](https://kubernetes.io/docs/reference/networking/virtual-ips/#session-stickiness-timeout) for ONLYOFFICE Docs service Session Affinity. Used if the `service.sessionAffinity` is set | `{}` | +| `license.existingSecret` | Name of the existing secret that contains the license. Must contain the key `license.lic` | `""` | +| `license.existingClaim` | Name of the existing PVC in which the license is stored. Must contain the file `license.lic` | `""` | +| `log.level` | Defines the type and severity of a logged event. Possible values are `ALL`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`, `MARK`, `OFF` | `WARN` | +| `log.type` | Defines the format of a logged event. Possible values are `pattern`, `json`, `basic`, `coloured`, `messagePassThrough`, `dummy` | `pattern` | +| `log.pattern` | Defines the log [pattern](https://github.com/log4js-node/log4js-node/blob/master/docs/layouts.md#pattern-format) if `log.type=pattern` | `[%d] [%p] %c - %.10000m` | +| `wopi.enabled` | Defines if `WOPI` is enabled. If the parameter is enabled, then caching attributes for the mounted directory (`PVC`) should be disabled for the client | `false` | +| `metrics.enabled` | Specifies the enabling StatsD for ONLYOFFICE Docs | `false` | +| `metrics.host` | Defines StatsD listening host | `statsd-exporter-prometheus-statsd-exporter` | +| `metrics.port` | Defines StatsD listening port | `8125` | +| `metrics.prefix` | Defines StatsD metrics prefix for backend services | `ds.` | +| `jwt.enabled` | Specifies the enabling the JSON Web Token validation by the ONLYOFFICE Docs. Common for inbox and outbox requests | `true` | +| `jwt.secret` | Defines the secret key to validate the JSON Web Token in the request to the ONLYOFFICE Docs. Common for inbox and outbox requests | `MYSECRET` | +| `jwt.header` | Defines the http header that will be used to send the JSON Web Token. Common for inbox and outbox requests | `Authorization` | +| `jwt.inBody` | Specifies the enabling the token validation in the request body to the ONLYOFFICE Docs | `false` | +| `jwt.inbox` | JSON Web Token validation parameters for inbox requests only. If not specified, the values of the parameters of the common `jwt` are used | `{}` | +| `jwt.outbox` | JSON Web Token validation parameters for outbox requests only. If not specified, the values of the parameters of the common `jwt` are used | `{}` | +| `jwt.existingSecret` | The name of an existing secret containing variables for jwt. If not specified, a secret named `jwt` will be created | `""` | +| `extraConf.configMap` | The name of the ConfigMap containing the json file that override the default values | `""` | +| `extraConf.filename` | The name of the json file that contains custom values. Must be the same as the `key` name in `extraConf.ConfigMap` | `production-linux-local.json` | +| `extraThemes.configMap` | The name of the ConfigMap containing the json file that contains the interface themes | `""` | +| `extraThemes.filename` | The name of the json file that contains custom interface themes. Must be the same as the `key` name in `extraThemes.configMap` | `custom-themes.json` | +| `sqlScripts.branchName` | The name of the repository branch from which sql scripts will be downloaded | `master` | +| `requestFilteringAgent.allowPrivateIPAddress` | Defines if it is allowed to connect private IP address or not. `requestFilteringAgent` parameters are used if JWT is disabled: `jwt.enabled=false` | `false` | +| `requestFilteringAgent.allowMetaIPAddress` | Defines if it is allowed to connect meta address or not | `false` | +| `requestFilteringAgent.allowIPAddressList` | Defines the list of IP addresses allowed to connect. This values are preferred than `requestFilteringAgent.denyIPAddressList` | `[]` | +| `requestFilteringAgent.denyIPAddressList` | Defines the list of IP addresses allowed to connect | `[]` | +| `documentserver.terminationGracePeriodSeconds` | The time to terminate gracefully during which the Pod will have the Terminating status | `60` | +| `documentserver.keysRedisDBNum` | The number of the database for storing the balancing results | `1` | +| `documentserver.KeysExpireTime` | The time in seconds after which the key will be deleted from the balancing database. by default 172800 mean 48 hours | `172800` | +| `documentserver.ingressCustomConfigMapsNamespace` | Define where custom controller configmaps will be deployed | `default` | +| `documentserver.annotations` | Defines annotations that will be additionally added to Documentserver Deployment | `{}` | +| `documentserver.podAnnotations` | Map of annotations to add to the Documentserver deployment pods | `rollme: "{{ randAlphaNum 5 | quote }}"` | +| `documentserver.replicas` | Number of Documentserver replicas to deploy. If the `documentserver.autoscaling.enabled` parameter is enabled, it is ignored. | `3` | +| `documentserver.updateStrategy.type` | Documentserver deployment update strategy type | `RollingUpdate` | +| `documentserver.customPodAntiAffinity` | Prohibiting the scheduling of Documentserver Pods relative to other Pods containing the specified labels on the same node | `{}` | +| `documentserver.podAffinity` | Pod affinity rules for Documentserver Pods scheduling by nodes relative to other Pods | `{}` | +| `documentserver.nodeAffinity` | Node affinity rules for Documentserver Pods scheduling by nodes | `{}` | +| `documentserver.nodeSelector` | Node labels for Documentserver Pods assignment | `{}` | +| `documentserver.tolerations` | Tolerations for Documentserver Pods assignment | `{}` | +| `documentserver.autoscaling.enabled` | Enable Documentserver deployment autoscaling | `false` | +| `documentserver.autoscaling.annotations` | Defines annotations that will be additionally added to Documentserver deployment HPA | `{}` | +| `documentserver.autoscaling.minReplicas` | Documentserver deployment autoscaling minimum number of replicas | `2` | +| `documentserver.autoscaling.maxReplicas` | Documentserver deployment autoscaling maximum number of replicas | `4` | +| `documentserver.autoscaling.targetCPU.enabled` | Enable autoscaling of Documentserver deployment by CPU usage percentage | `true` | +| `documentserver.autoscaling.targetCPU.utilizationPercentage`| Documentserver deployment autoscaling target CPU percentage | `70` | +| `documentserver.autoscaling.targetMemory.enabled` | Enable autoscaling of Documentserver deployment by memory usage percentage | `false` | +| `documentserver.autoscaling.targetMemory.utilizationPercentage` | Documentserver deployment autoscaling target memory percentage | `70` | +| `documentserver.autoscaling.customMetricsType` | Custom, additional or external autoscaling metrics for the documentserver deployment | `[]` | +| `documentserver.autoscaling.behavior` | Configuring Documentserver deployment scaling behavior policies for the `scaleDown` and `scaleUp` fields | `{}` | +| `documentserver.initContainers.image.repository` | Documentserver add-shardkey initContainer image repository | `onlyoffice/docs-utils` | +| `documentserver.initContainers.image.tag` | Documentserver add-shardkey initContainer image tag | `8.1.1-2` | +| `documentserver.initContainers.image.pullPolicy` | Documentserver add-shardkey initContainer image pull policy | `IfNotPresent` | +| `documentserver.initContainers.containerSecurityContext.enabled` | Configure a Security Context for Documentserver add-shardkey initContainer container in Pod | `false` | +| `documentserver.initContainers.resources.requests` | The requested resources for the Documentserver add-shardkey initContainer | `{}` | +| `documentserver.initContainers.resources.limits` | The resources limits for the Documentserver add-shardkey initContainer | `{}` | +| `documentserver.initContainers.custom` | Custom Documentserver initContainers parameters | `[]` | + +### documentserver.docservice parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `documentserver.docservice.image.repository` | Docservice container image repository* | `onlyoffice/docs-docservice-de` | +| `documentserver.docservice.image.tag` | Docservice container image tag | `8.1.1-2` | +| `documentserver.docservice.image.pullPolicy` | Docservice container image pull policy | `IfNotPresent` | +| `documentserver.docservice.containerSecurityContext.enabled`| Enable security context for the Docservice container | `false` | +| `documentserver.docservice.containerPorts.http` | Define docservice container port | `8000` | +| `documentserver.docservice.readinessProbe.enabled` | Enable readinessProbe for Docservice container | `true` | +| `documentserver.docservice.livenessProbe.enabled` | Enable livenessProbe for Docservice container | `true` | +| `documentserver.docservice.startupProbe.enabled` | Enable startupProbe for Docservice container | `true` | +| `documentserver.docservice.resources.requests` | The requested resources for the Docservice container | `{}` | +| `documentserver.docservice.resources.limits` | The resources limits for the Docservice container | `{}` | + + +### documentserver.proxy parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `documentserver.proxy.accessLog` | Defines the nginx config [access_log](https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log) format directive | `main` | +| `documentserver.proxy.gzipProxied` | Defines the nginx config [gzip_proxied](https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied) directive | `off` | +| `documentserver.proxy.clientMaxBodySize` | Defines the nginx config [client_max_body_size](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size) directive | `100m` | +| `documentserver.proxy.workerConnections` | Defines the nginx config [worker_connections](https://nginx.org/en/docs/ngx_core_module.html#worker_connections) directive | `4096` | +| `documentserver.proxy.workerProcesses` | Defines the nginx config worker_processes directive | `1` | +| `documentserver.proxy.secureLinkSecret` | Defines secret for the nginx config directive [secure_link_md5](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_md5) | `verysecretstring` | +| `documentserver.proxy.infoAllowedIP` | Defines ip addresses for accessing the info page | `[]` | +| `documentserver.proxy.infoAllowedUser` | Defines user name for accessing the info page. If not set to, Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) will not be applied to access the info page. For more details, see [here](#12-access-to-the-info-page-optional) | `""` | +| `documentserver.proxy.infoAllowedPassword` | Defines user password for accessing the info page. Used if `proxy.infoAllowedUser` is set | `password` | +| `documentserver.proxy.infoAllowedSecretKeyName` | The name of the key that contains the info auth user password. Used if `proxy.infoAllowedUser` is set | `info-auth-password` | +| `documentserver.proxy.infoAllowedExistingSecret` | Name of existing secret to use for info auth password. Used if `proxy.infoAllowedUser` is set. Must contain the key specified in `proxy.infoAllowedSecretKeyName`. If set to, it takes priority over the `proxy.infoAllowedPassword` | `""` | +| `documentserver.proxy.welcomePage.enabled` | Defines whether the welcome page will be displayed | `true` | +| `documentserver.proxy.image.repository` | Docservice Proxy container image repository* | `onlyoffice/docs-proxy-de` | +| `documentserver.proxy.image.tag` | Docservice Proxy container image tag | `8.1.1-2` | +| `documentserver.proxy.image.pullPolicy` | Docservice Proxy container image pull policy | `IfNotPresent` | +| `documentserver.proxy.containerSecurityContext.enabled` | Enable security context for the Proxy container | `false` | +| `documentserver.proxy.containerPorts.http` | proxy container port | `8888` | +| `documentserver.proxy.resources.requests` | The requested resources for the Proxy container | `{}` | +| `documentserver.proxy.resources.limits` | The resources limits for the Proxy container | `{}` | +| `documentserver.proxy.readinessProbe.enabled` | Enable readinessProbe for Proxy container | `true` | +| `documentserver.proxy.livenessProbe.enabled` | Enable livenessProbe for Proxy container | `true` | +| `documentserver.proxy.startupProbe.enabled` | Enable startupProbe for Proxy container | `true` | + +### documentserver.converter parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `documentserver.converter.count` | The mumber of Converter containers in the Documentserver Pod | `3` | +| `documentserver.converter.image.repository` | Converter container image repository* | `onlyoffice/docs-converter-de` | +| `documentserver.converter.image.tag` | Converter container image tag | `8.1.1-2` | +| `documentserver.converter.image.pullPolicy` | Converter container image pull policy | `IfNotPresent` | +| `documentserver.converter.containerSecurityContext.enabled` | Enable security context for the Converter container | `false` | +| `documentserver.converter.resources.requests` | The requested resources for the Converter container | `{}` | +| `documentserver.converter.resources.limits` | The resources limits for the Converter container | `{}` | + +### documentserver.postgresql parameters + +List of parameters for customizing the database inside the documentserver pod + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `documentserver.postgresql.image.repository` | Postgresql container image repository | `postgres` | +| `documentserver.postgresql.image.tag` | Postgresql container image tag | `16` | +| `documentserver.postgresql.image.pullPolicy` | Postgresql container image pull policy | `IfNotPresent` | +| `documentserver.postgresql.containerSecurityContext.enabled`| Enable security context for the Postgresql container | `false` | +| `documentserver.postgresql.containerPorts.tcp` | Postgresql container port | `5432` | +| `documentserver.postgresql.resources.requests` | The requested resources for the Postgresql container | `{}` | +| `documentserver.postgresql.resources.limits` | The resources limits for the Postgresql container | `{}` | + +### documentserver.rabbitmq parameters + +List of parameters for broker inside the documentserver pod + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `documentserver.rabbitmq.image.repository` | Rabbitmq container image repository | `rabbitmq` | +| `documentserver.rabbitmq.image.tag` | Rabbitmq container image tag | `3.12.10` | +| `documentserver.rabbitmq.image.pullPolicy` | Rabbitmq container image pull policy | `ifNotPresent` | +| `documentserver.rabbitmq.containerSecurityContext.enabled` | Enable security context for the Rabbitmq container | `false` | +| `documentserver.rabbitmq.containerPorts.amqp` | Rabbitmq container port | `5672` | +| `documentserver.rabbitmq.resources.requests` | The requested resources for the Rabbitmq container | `{}` | +| `documentserver.rabbitmq.resources.limits` | The resources limits for the Rabbitmq container | `{}` | + +### Example parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `example.enabled` | Enables the installation of Example | `false` | +| `example.annotations` | Defines annotations that will be additionally added to Example StatefulSet. If set to, it takes priority over the `commonAnnotations` | `{}` | +| `example.podAnnotations` | Map of annotations to add to the example pod | `rollme: "{{ randAlphaNum 5 \| quote }}"` | +| `example.updateStrategy.type` | Example StatefulSet update strategy type | `RollingUpdate` | +| `example.customPodAntiAffinity` | Prohibiting the scheduling of Example Pod relative to other Pods containing the specified labels on the same node | `{}` | +| `example.podAffinity` | Defines [Pod affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) rules for Example Pod scheduling by nodes relative to other Pods | `{}` | +| `example.nodeAffinity` | Defines [Node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) rules for Example Pod scheduling by nodes | `{}` | +| `example.nodeSelector` | Node labels for Example Pods assignment. If set to, it takes priority over the `nodeSelector` | `{}` | +| `example.tolerations` | Tolerations for Example Pods assignment. If set to, it takes priority over the `tolerations` | `[]` | +| `example.image.repository` | Example container image name | `onlyoffice/docs-example` | +| `example.image.tag` | Example container image tag | `8.1.1-2` | +| `example.image.pullPolicy` | Example container image pull policy | `IfNotPresent` | +| `example.containerSecurityContext.enabled` | Enable security context for the Example container | `false` | +| `example.dsUrl` | ONLYOFFICE Docs external address. It should be changed only if it is necessary to check the operation of the conversion in Example (e.g. http://\/) | `/` | +| `example.resources.requests` | The requested resources for the Example container | `{}` | +| `example.resources.limits` | The resources limits for the Example container | `{}` | +| `example.extraConf.configMap` | The name of the ConfigMap containing the json file that override the default values. See an example of creation [here](https://github.com/ONLYOFFICE/Kubernetes-Docs?tab=readme-ov-file#71-create-a-configmap-containing-a-json-file) | `""` | +| `example.extraConf.filename` | The name of the json file that contains custom values. Must be the same as the `key` name in `example.extraConf.ConfigMap` | `local.json` | + +### Ingress parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `ingress.enabled` | Enable the creation of an ingress for the ONLYOFFICE Docs | `true` | +| `ingress.annotations` | Map of annotations to add to the Ingress. If set to, it takes priority over the `commonAnnotations` | `nginx.ingress.kubernetes.io/proxy-body-size: 100m` | +| `ingress.ingressClassName` | Used to reference the IngressClass that should be used to implement this Ingress | `nginx` | +| `ingress.host` | Ingress hostname for the ONLYOFFICE Docs ingress | `""` | +| `ingress.ssl.enabled` | Enable ssl for the ONLYOFFICE Docs ingress | `false` | +| `ingress.ssl.secret` | Secret name for ssl to mount into the Ingress | `tls` | +| `ingress.path` | Specifies the path where ONLYOFFICE Docs will be available | `/` | + +### Grafana parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `grafana.enabled` | Enable the installation of resources required for the visualization of metrics in Grafana | `false` | +| `grafana.namespace` | The name of the namespace in which RBAC components and Grafana resources will be deployed. If not set, the name will be taken from `namespaceOverride` if set, or .Release.Namespace | `""` | +| `grafana.ingress.enabled` | Enable the creation of an ingress for the Grafana. Used if you set `grafana.enabled` to `true` and want to use Nginx Ingress to access Grafana | `false` | +| `grafana.ingress.annotations` | Map of annotations to add to Grafana Ingress. If set to, it takes priority over the `commonAnnotations` | `nginx.ingress.kubernetes.io/proxy-body-size: 100m` | +| `grafana.dashboard.enabled` | Enable the installation of ready-made Grafana dashboards. Used if you set `grafana.enabled` to `true` | `false` | +| `grafanaDashboard.job.annotations` | Defines annotations that will be additionally added to Grafana Dashboard Job. If set to, it takes priority over the `commonAnnotations` | `{}` | +| `grafanaDashboard.job.podAnnotations` | Map of annotations to add to the Grafana Dashboard Pod | `{}` | +| `grafanaDashboard.job.customPodAntiAffinity` | Prohibiting the scheduling of Grafana Dashboard Job Pod relative to other Pods containing the specified labels on the same node | `{}` | +| `grafanaDashboard.job.podAffinity` | Defines [Pod affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) rules for Grafana Dashboard Job Pod scheduling by nodes relative to other Pods | `{}` | +| `grafanaDashboard.job.nodeAffinity` | Defines [Node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) rules for Grafana Dashboard Job Pod scheduling by nodes | `{}` | +| `grafanaDashboard.job.nodeSelector` | Node labels for Grafana Dashboard Job Pod assignment. If set to, it takes priority over the `nodeSelector` | `{}` | +| `grafanaDashboard.job.tolerations` | Tolerations for Grafana Dashboard Job Pod assignment. If set to, it takes priority over the `tolerations` | `[]` | +| `grafanaDashboard.job.image.repository` | Job by Grafana Dashboard ONLYOFFICE Docs image repository | `onlyoffice/docs-utils` | +| `grafanaDashboard.job.image.tag` | Job by Grafana Dashboard ONLYOFFICE Docs image tag | `8.1.1-2` | +| `grafanaDashboard.job.image.pullPolicy` | Job by Grafana Dashboard ONLYOFFICE Docs image pull policy | `IfNotPresent` | +| `grafanaDashboard.job.containerSecurityContext.enabled` | Enable security context for the Grafana Dashboard container | `false` | +| `grafanaDashboard.job.resources.requests` | The requested resources for the job Grafana Dashboard container | `{}` | +| `grafanaDashboard.job.resources.limits` | The resources limits for the job Grafana Dashboard container | `{}` | + +### Testing parameters + +| Parameter | Description | Default | +|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| +| `tests.enabled` | Enable the resources creation necessary for ONLYOFFICE Docs launch testing and connected dependencies availability testing. These resources will be used when running the `helm test` command | `true` | +| `tests.annotations` | Defines annotations that will be additionally added to Test Pod. If set to, it takes priority over the `commonAnnotations` | `{}` | +| `tests.customPodAntiAffinity` | Prohibiting the scheduling of Test Pod relative to other Pods containing the specified labels on the same node | `{}` | +| `tests.podAffinity` | Defines [Pod affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity) rules for Test Pod scheduling by nodes relative to other Pods | `{}` | +| `tests.nodeAffinity` | Defines [Node affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity) rules for Test Pod scheduling by nodes | `{}` | +| `tests.nodeSelector` | Node labels for Test Pod assignment. If set to, it takes priority over the `nodeSelector` | `{}` | +| `tests.tolerations` | Tolerations for Test Pod assignment. If set to, it takes priority over the `tolerations` | `[]` | +| `tests.image.repository` | Test container image name | `onlyoffice/docs-utils` | +| `tests.image.tag` | Test container image tag | `8.1.1-2` | +| `tests.image.pullPolicy` | Test container image pull policy | `IfNotPresent` | +| `tests.containerSecurityContext.enabled` | Enable security context for the Test container | `false` | +| `tests.resources.requests` | The requested resources for the test container | `{}` | +| `tests.resources.limits` | The resources limits for the test container | `{}` | + +* *Note: The prefix `-de` is specified in the value of the image repository, which means solution type. Possible options: + - Nothing is specified. For the open-source community version + - `-de`. For commercial Developer Edition + - `-ee`. For commercial Enterprise Edition + + If you use the community version, there may be problems with co-editing documents. + + The default value of this parameter refers to the ONLYOFFICE Document Server Developer Edition. To learn more about this edition and compare it with other editions, please see the comparison table on [this page](https://github.com/ONLYOFFICE/DocumentServer#onlyoffice-docs-editions). + +Specify each parameter using the `--set key=value[,key=value]` argument to helm install. For example, + +```bash +$ helm install documentserver onlyoffice/docs-shards --set ingress.enabled=true,ingress.ssl.enabled=true,ingress.host=example.com +``` + +This command gives expose ONLYOFFICE Docs via HTTPS. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install documentserver -f values.yaml onlyoffice/docs-shards +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +### 5. Configuration and installation details + +### 5.1 Example deployment (optional) + +To deploy the example, set the `example.enabled` parameter to true: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set example.enabled=true +``` + +### 5.2 Metrics deployment (optional) +To deploy metrics, set `metrics.enabled` to true: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set metrics.enabled=true +``` + +If you want to use Grafana to visualize metrics, set `grafana.enabled` to `true`. If you want to use Nginx Ingress to access Grafana, set `grafana.ingress.enabled` to `true`: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set grafana.enabled=true --set grafana.ingress.enabled=true +``` + +### 5.3 Expose ONLYOFFICE Docs via HTTPS + +This type of exposure allows you to enable internal TLS termination for ONLYOFFICE Docs. + +Create the `tls` secret with an ssl certificate inside. + +Put the ssl certificate and the private key into the `tls.crt` and `tls.key` files and then run: + +```bash +$ kubectl create secret generic tls \ + --from-file=./tls.crt \ + --from-file=./tls.key +``` + +```bash +$ helm install documentserver onlyoffice/docs-shards --set ingress.enabled=true,ingress.ssl.enabled=true,ingress.host=example.com + +``` + +Run the following command to get the `documentserver` ingress IP: + +```bash +$ kubectl get ingress documentserver -o jsonpath="{.status.loadBalancer.ingress[*].ip}" +``` + +If the ingress IP is empty, try getting the `documentserver` ingress hostname: + +```bash +$ kubectl get ingress documentserver -o jsonpath="{.status.loadBalancer.ingress[*].hostname}" +``` + +Associate the `documentserver` ingress IP or hostname with your domain name through your DNS provider. + +After that, ONLYOFFICE Docs will be available at `https://your-domain-name/`. + +### 6. Scale ONLYOFFICE Docs (optional) + +*This step is optional. You can skip step [6](#6-scale-onlyoffice-docs-optional) entirely if you want to use default deployment settings.* + +#### 6.1 Horizontal Pod Autoscaling + +You can enable Autoscaling so that the number of replicas of `documentserver` deployment is calculated automatically based on the values and type of metrics. + +For resource metrics, API metrics.k8s.io must be registered, which is generally provided by [metrics-server](https://github.com/kubernetes-sigs/metrics-server). It can be launched as a cluster add-on. + +To use the target utilization value (`target.type==Utilization`), it is necessary that the values for `resources.requests` are specified in the deployment. + +For more information about Horizontal Pod Autoscaling, see [here](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/). + +To enable HPA for the `documentserver` deployment, specify the `documentserver.autoscaling.enabled=true` parameter. +In this case, the `documentserver.replicas` parameter is ignored and the number of replicas is controlled by HPA. + +With the `autoscaling.enabled` parameter enabled, by default Autoscaling will adjust the number of replicas based on the average percentage of CPU Utilization. +For other configurable Autoscaling parameters, see the [Parameters](#4-parameters) table. + +#### 6.2 Manual scaling + +Note: The `documentserver` deployments consist all the necessary dependencies in one pod. + +To scale the `documentserver` deployment, use the following command: + +```bash +$ kubectl scale -n default deployment documentserver --replicas=POD_COUNT +``` + +where `POD_COUNT` is a number of the `documentserver` pods. + +### 7. Update ONLYOFFICE Docs license (optional) + +In order to update the license, you need to perform the following steps: + - Place the license.lic file containing the new key in some directory + - Run the following commands: +```bash +$ kubectl delete secret license -n +$ kubectl create secret generic license --from-file=path/to/license.lic -n +``` + - Restart `documentserver` pods. For example, using the following command: +```bash +$ kubectl delete pod documentserver-*** -n +``` + +### 8. ONLYOFFICE Docs installation test (optional) + +You can test ONLYOFFICE Docs availability and access to connected dependencies by running the following command: + +```bash +$ helm test documentserver -n +``` + +The output should have the following line: + +```bash +Phase: Succeeded +``` + +To view the log of the Pod running as a result of the `helm test` command, run the following command: + +```bash +$ kubectl logs -f test-ds -n +``` + +The ONLYOFFICE Docs availability check is considered a priority, so if it fails with an error, the test is considered to be failed. + +After this, you can delete the `test-ds` Pod by running the following command: + +```bash +$ kubectl delete pod test-ds -n +``` + +Note: This testing is for informational purposes only and cannot guarantee 100% availability results. +It may be that even though all checks are completed successfully, an error occurs in the application. +In this case, more detailed information can be found in the application logs. + +### 9. Access to the info page (optional) + +The access to `/info` page is limited by default. +In order to allow the access to it, you need to specify the IP addresses or subnets (that will be Proxy container clients in this case) using `proxy.infoAllowedIP` parameter. +Taking into consideration the specifics of Kubernetes net interaction it is possible to get the original IP of the user (being Proxy client) though it's not a standard scenario. +Generally the Pods / Nodes / Load Balancer addresses will actually be the clients, so these addresses are to be used. +In this case the access to the info page will be available to everyone. +You can further limit the access to the `info` page using Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) which you can turn on by setting `proxy.infoAllowedUser` parameter value and by setting the password using `proxy.infoAllowedPassword` parameter, alternatively you can use the existing secret with password by setting its name with `proxy.infoAllowedExistingSecret` parameter. + +### 10. Deploy ONLYOFFICE Docs with your own dependency (optional) + +### 10.1 Use your own Redis + +#### 10.1.1 Connect to Redis using password + +To use your own Redis, you need to disable the Redis/bitnami subchart during ONLYOFFICE Docs deployment and configure `connections.redis` options. + +For deploy ONLYOFFICE Docs and connect to existing Redis using `connections.redisHost` and `connections.redisPassword` follow the command: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set redis.enabled=false --set connections.redisHost=YOUR_REDIS_HOST --set connections.redisPassword=YOUR_SECURE_PWD +``` + +Note: This command will create a secret with the password value that you set, and use the value from this secret to connect to Redis. + +#### 10.1.2 (Alternative) Connect to Redis using existing secret + +Alternative, you can create secret with the Redis password by yourself, and specify `connections.redisExistingSecret` parameter during deployment ONLYOFFICE Docs , for example: + +```bash +$ helm install documentserver onlyoffice/docs-shards --set redis.enabled=false --set connections.redisHost=YOUR_REDIS_HOST --set connections.redisExistingSecret=YOUR_SECRET_NAME +``` + +Note: In your own secret, the key that contains the password must be named `redis-password`. If this is not the case, add a parameter that will override key name in the secret with parameter `redisSecretKeyName`. + +### 10.2 Use your own nginx-ingress controller + +**Note:** ONLYOFFICE Docs support **only** nginx-ingress controller [by the kubernetes](https://github.com/kubernetes/ingress-nginx). + +If you want to deploy ONLYOFFICE Docs in cluster where already exist nginx-ingress controller, please follow the step below. + +**First of all** is to render two configMaps templates with `helm template` command, and apply them. This configMaps are needed for normal functioning of balancing requests between Docs shards. + +**Note:** These config maps must be located in the same namespace as your deployment nginx-ingress controller. To ensure that the generated config maps will be deployed in the same namespace as your nginx-ingress controller, please set the parameter `documentserver.ingressCustomConfigMapsNamespace` if needed. + +**Note:** When creating configMaps manually, check and change if necessary the parameters for connecting to Redis. + +> All available Redis connections parameters present [here](#4-parameters) with the `connections.` prefix + +```bash +helm template docs onlyoffice/docs-shards --set connections.redisPassword= --set documentserver.ingressCustomConfigMapsNamespace= --show-only templates/configmaps/balancer-snippet.yaml --show-only templates/configmaps/balancer-lua.yaml --dry-run=client > ./ingressConfigMaps.yaml +``` + +**The second step**, apply configMaps that you create with command below: + +```bash +$ kubectl apply -f ./ingressConfigMaps.yaml +``` + +**The third step**, you need to update your nginx-ingress controller deployment with new parameters.That will add volumes with the necessary configmaps that you just created. Follow the commands: + +```bash +$ helm upgrade ingress-nginx --repo https://kubernetes.github.io/ingress-nginx -n -f https://raw.githubusercontent.com/ONLYOFFICE/Docs-Shards/sources/ingress_values.yaml +``` + +**Now**, when your nginx-ingress controller if configure, you can deploy ONLYOFFICE Docs with command: + +```bash +$ helm install docs onlyoffice/docs-shards --set ingress-nginx.enabled=false --set redis.master.persistence.storageClass=PERSISTENT_STORAGE_CLASS --set redis.auth.password= +``` + +## Using Grafana to visualize metrics (optional) + +*This step is optional. You can skip this section if you don't want to install Grafana* + +### 1. Deploy Grafana + +Note: It is assumed that step [#6.2](#62-installing-prometheus) has already been completed. + +#### 1.1 Deploy Grafana without installing ready-made dashboards + +*You should skip step [#1.1](#11-deploy-grafana-without-installing-ready-made-dashboards) if you want to Deploy Grafana with the installation of ready-made dashboards* + +To install Grafana to your cluster, run the following command: + +```bash +$ helm install grafana bitnami/grafana \ + --set service.ports.grafana=80 \ + --set config.useGrafanaIniFile=true \ + --set config.grafanaIniConfigMap=grafana-ini \ + --set datasources.secretName=grafana-datasource +``` + +#### 1.2 Deploy Grafana with the installation of ready-made dashboards + +#### 1.2.1 Installing ready-made Grafana dashboards + +To install ready-made Grafana dashboards, set the `grafana.enabled` and `grafana.dashboard.enabled` parameters to `true`. +If ONLYOFFICE Docs is already installed you need to run the `helm upgrade documentserver onlyoffice/docs-shards --set grafana.enabled=true --set grafana.dashboard.enabled=true` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the [values.yaml](values.yaml) file. +As a result, ready-made dashboards in the `JSON` format will be downloaded from the Grafana [website](https://grafana.com/grafana/dashboards), +the necessary edits will be made to them and configmap will be created from them. A dashboard will also be added to visualize metrics coming from the ONLYOFFICE Docs (it is assumed that step [#6](#6-deploy-statsd-exporter) has already been completed). + +#### 1.2.2 Installing Grafana + +To install Grafana to your cluster, run the following command: + +```bash +$ helm install grafana bitnami/grafana \ + --set service.ports.grafana=80 \ + --set config.useGrafanaIniFile=true \ + --set config.grafanaIniConfigMap=grafana-ini \ + --set datasources.secretName=grafana-datasource \ + --set dashboardsProvider.enabled=true \ + --set dashboardsConfigMaps[0].configMapName=dashboard-node-exporter \ + --set dashboardsConfigMaps[0].fileName=dashboard-node-exporter.json \ + --set dashboardsConfigMaps[1].configMapName=dashboard-deployment \ + --set dashboardsConfigMaps[1].fileName=dashboard-deployment.json \ + --set dashboardsConfigMaps[2].configMapName=dashboard-redis \ + --set dashboardsConfigMaps[2].fileName=dashboard-redis.json \ + --set dashboardsConfigMaps[5].configMapName=dashboard-nginx-ingress \ + --set dashboardsConfigMaps[5].fileName=dashboard-nginx-ingress.json \ + --set dashboardsConfigMaps[6].configMapName=dashboard-documentserver \ + --set dashboardsConfigMaps[6].fileName=dashboard-documentserver.json \ + --set dashboardsConfigMaps[7].configMapName=dashboard-cluster-resourses \ + --set dashboardsConfigMaps[7].fileName=dashboard-cluster-resourses.json +``` + +After executing this command, the following dashboards will be imported into Grafana: + + - Node Exporter + - Deployment Statefulset Daemonset + - Redis Dashboard for Prometheus Redis Exporter + - NGINX Ingress controller + - ONLYOFFICE Docs + - Resource usage by Pods and Containers + +Note: You can see the description of the ONLYOFFICE Docs metrics that are visualized in Grafana [here](https://github.com/ONLYOFFICE/Kubernetes-Docs/wiki/Document-Server-Metrics). + +See more details about installing Grafana via Helm [here](https://github.com/bitnami/charts/tree/master/bitnami/grafana). + +### 2 Access to Grafana via Ingress + +Note: It is assumed that step [#5.3.2.1](#5321-installing-the-kubernetes-nginx-ingress-controller) has already been completed. + +If ONLYOFFICE Docs was installed with the parameter `grafana.ingress.enabled` (step [#5.2](#52-metrics-deployment-optional)) then access to Grafana will be at: `http://INGRESS-ADDRESS/grafana/` + +If Ingres was installed using a secure connection (step [#5.3.2.3](#5323-expose-onlyoffice-docs-via-https)), then access to Grafana will be at: `https://your-domain-name/grafana/` + +### 3. View gathered metrics in Grafana + +Go to the address `http(s)://your-domain-name/grafana/` + +`Login - admin` + +To get the password, run the following command: + +``` +$ kubectl get secret grafana-admin --namespace default -o jsonpath="{.data.GF_SECURITY_ADMIN_PASSWORD}" | base64 --decode +``` + +In the dashboard section, you will see the added dashboards that will display the metrics received from Prometheus. diff --git a/sources/extraScrapeConfigs.yaml b/sources/extraScrapeConfigs.yaml new file mode 100644 index 0000000..192280d --- /dev/null +++ b/sources/extraScrapeConfigs.yaml @@ -0,0 +1,6 @@ +extraScrapeConfigs: | + - job_name: 'statsd' + scrape_interval: 30s + static_configs: + - targets: + - statsd-exporter-prometheus-statsd-exporter:9102 diff --git a/sources/ingress_values.yaml b/sources/ingress_values.yaml new file mode 100644 index 0000000..769bf27 --- /dev/null +++ b/sources/ingress_values.yaml @@ -0,0 +1,19 @@ +controller: + allowSnippetAnnotations: true + extraVolumeMounts: + - name: custom-balancer + mountPath: /etc/nginx/custom_balancer.conf + subPath: custom_balancer.conf + - name: balancer-lua + mountPath: /etc/nginx/lua/balancer.lua + subPath: balancer.lua + extraVolumes: + - name: custom-balancer + configMap: + name: balancer-snippet + - name: balancer-lua + configMap: + name: balancer-lua + publishService: + enabled: true + replicaCount: 2 diff --git a/sources/litmus/deploy-Kubernetes-Docs.sh b/sources/litmus/deploy-Kubernetes-Docs.sh new file mode 100755 index 0000000..e20e227 --- /dev/null +++ b/sources/litmus/deploy-Kubernetes-Docs.sh @@ -0,0 +1,436 @@ +#!/usr/bin/env bash + +# Scripts for deploys and check Kuberneted-Docs helm chart + +set -e + +while [ "$1" != "" ]; do + case $1 in + + -tb | --target-branch ) + if [ "$2" != "" ]; then + TARGET_BRANCH=$2 + shift + fi + ;; + + esac + shift +done + +K8S_STORAGE_CLASS="standard" +NFS_PERSISTANCE_SIZE="10Gi" +LITMUS_VERSION="1.13.6" + +WORK_DIR=$(pwd) + +# Create log file for debug +touch ./log.txt + +export TERM=xterm-256color^M + +function common::get_colors() { + COLOR_BLUE=$'\e[34m' + COLOR_GREEN=$'\e[32m' + COLOR_RED=$'\e[31m' + COLOR_RESET=$'\e[0m' + COLOR_YELLOW=$'\e[33m' + export COLOR_BLUE + export COLOR_GREEN + export COLOR_RED + export COLOR_RESET + export COLOR_YELLOW +} + +function k8s_w8_workers() { + for i in {1..20}; do + echo "${COLOR_BLUE}🔨⎈ Get k8s workers status ${i}...${COLOR_RESET}" + local NODES_STATUS=$(kubectl get nodes -o json | jq -r '.items[] | select ( .status.conditions[] | select( .type=="Ready" and .status=="False")) | .metadata.name') + if [[ -z "${NODES_STATUS}" ]]; then + echo "${COLOR_GREEN}☑ OK: K8s workers is ready. Continue...${COLOR_RESET}" + local k8s_workers_ready='true' + break + else + sleep 5 + fi + done + if [[ "${k8s_workers_ready}" != 'true' ]]; then + echo "${COLOR_RED} Something goes wrong. k8s is not ready ${COLOR_RESET}" + exit 1 + fi +} + +function k8s_get_info() { + echo "${COLOR_BLUE}🔨⎈ Get cluster info...${COLOR_RESET}" + kubectl get all + kubectl get ns + kubectl get sc + kubectl get nodes +} + +function k8s_pods_logs() { + ## Get not ready pods + local PODS=$(kubectl get pods --all-namespaces -o go-template='{{ range $item := .items }} + {{ range .status.conditions }} + {{ if (or (and (eq .type "PodScheduled") + (eq .status "False")) (and (eq .type "Ready") + (eq .status "False"))) }} + {{ $item.metadata.name}} {{ end }}{{ end }}{{ end }}') + + ## Get pods logs + if [[ -n ${PODS} ]]; then + echo ${PODS} + echo "${COLOR_RED}⚠ ⚠ ⚠ Attention: looks like some pods is not running. Get logs${COLOR_RESET}" + for p in ${PODS}; do + echo "${COLOR_BLUE} 🔨⎈ Get ${p} logs${COLOR_RESET}" + kubectl logs ${p} + done + else + echo "${COLOR_BLUE} 🔨⎈ All pods is ready!${COLOR_RESET}" + fi +} + +function k8s_deploy_deps() { + echo "${COLOR_BLUE}🔨⎈ Add depends helm repos...${COLOR_RESET}" + # Add dependency helm charts + helm repo add kubemonkey https://asobti.github.io/kube-monkey/charts/repo + helm repo add bitnami https://charts.bitnami.com/bitnami + helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx + helm repo add nfs-server-provisioner https://kubernetes-sigs.github.io/nfs-ganesha-server-and-external-provisioner + helm repo add onlyoffice https://download.onlyoffice.com/charts/stable + helm repo update + echo "${COLOR_GREEN}☑ OK: Helm repository was added${COLOR_RESET}" + + echo "${COLOR_BLUE}🔨⎈ Lets deploy dependency...${COLOR_RESET}" + # Install nfs server + helm install nfs-server nfs-server-provisioner/nfs-server-provisioner \ + --set persistence.enabled=true \ + --set persistence.storageClass=${K8S_STORAGE_CLASS} \ + --set persistence.size=${NFS_PERSISTANCE_SIZE} > /dev/null 2>&1 + echo "${COLOR_GREEN}☑ OK: NFS Server was deployed${COLOR_RESET}" + + # Install rabbitmq + helm install rabbitmq bitnami/rabbitmq \ + --set metrics.enabled=false > /dev/null 2>&1 + echo "${COLOR_GREEN}☑ OK: Rabbitmq was deployed${COLOR_RESET}" + + # Install redis + helm install redis bitnami/redis \ + --set architecture=standalone \ + --set metrics.enabled=false > /dev/null 2>&1 + echo "${COLOR_GREEN}☑ OK: Redis was deployed${COLOR_RESET}" + + # Install postgresql + helm install postgresql bitnami/postgresql \ + --set auth.database=postgres \ + --set primary.persistence.size=2G \ + --set metrics.enabled=false > /dev/null 2>&1 + echo "${COLOR_GREEN}☑ OK: Postgresql was deployed${COLOR_RESET}" + } + +function k8s_wait_deps() { + echo "${COLOR_BLUE}🔨⎈ Wait 2 minutes for k8s-Docs dependencies${COLOR_RESET}" + sleep 120 + kubectl get pods + } + +function k8s_ct_install() { + local EXIT_CODE=0 + + echo "${COLOR_YELLOW}⚠ Attention: Start ct install test..${COLOR_RESET}" + ct install --chart-dirs . --charts . --helm-extra-set-args "--set=docservice.livenessProbe.periodSeconds=1 \ + --set=docservice.livenessProbe.failureThreshold=1 \ + --set=docservice.livenessProbe.timeoutSeconds=2 \ + --set=docservice.resources.requests.memory="256Mi" \ + --set=docservice.resources.requests.cpu="100m" \ + --set=docservice.resources.limits.memory="2000Mi" \ + --set=docservice.resources.limits.cpu="1" \ + --set=proxy.livenessProbe.periodSeconds=1 \ + --set=proxy.livenessProbe.failureThreshold=1 \ + --set=proxy.livenessProbe.timeoutSeconds=2 \ + --set=proxy.resources.requests.memory="256Mi" \ + --set=proxy.resources.requests.cpu="100m" \ + --set=proxy.resources.limits.memory="2000Mi" \ + --set=proxy.resources.limits.cpu="1" \ + --wait" \ + --skip-clean-up --namespace default || EXIT_CODE=$? + if [[ "${EXIT_CODE}" == 0 ]]; then + local CT_STATUS="success" + # Get release name + RELEASE_NAME=$(helm list | grep docs | awk '{print $1}') + + # Expose docservice for tests + kubectl apply -f ./sources/litmus/docs-node.yaml + + echo + echo "${COLOR_GREEN}👌👌👌⎈ Helm install/test/upgrade successfull finished${COLOR_RESET}" + echo + echo "${COLOR_BLUE}🔨⎈ Get test logs...${COLOR_RESET}" + echo + + kubectl logs -f test-ds --namespace=default + k8s_get_info + else + local CT_STATUS="failed" + + echo + echo "${COLOR_RED}🔥 Helm install\tests\upgrade failed. Get test logs and exit with ${EXIT_CODE}${COLOR_RESET}" + echo + + kubectl logs -f test-ds --namespace=default + k8s_pods_logs || true + k8s_get_info + exit ${EXIT_CODE} + fi + +} + +function k8s_litmus_install () { + echo "${COLOR_BLUE}🔨⎈ Install Litmus Chaos...${COLOR_RESET}" + kubectl apply -f https://litmuschaos.github.io/litmus/litmus-operator-v${LITMUS_VERSION}.yaml + echo + echo "${COLOR_BLUE}🔨⎈ Litmus was deployed with helm. Namespace litmus is created. Wait for ready status...${COLOR_RESET}" + echo + local READY_LITMUS_PODS="" + + while [ "${READY_LITMUS_PODS}" == "" ]; do + echo "${COLOR_YELLOW}Litmus is not ready yet, please wait... ${COLOR_RESET}" + READY_LITMUS_PODS=$(kubectl get pods -n litmus | grep Running | awk '{ print $3 }') + sleep 5 + done + + if [ -n "${READY_LITMUS_PODS}" ]; then + echo "${COLOR_GREEN}☑ OK:Litmus is ready ${COLOR_RESET}" + fi + + kubectl get pods --namespace litmus + + echo "${COLOR_BLUE}🔨⎈ Install litmus experiments...${COLORE_RESET}" + tar -zxvf <(curl -sL https://github.com/litmuschaos/chaos-charts/archive/${LITMUS_VERSION}.tar.gz) + kubectl apply -f ./chaos-charts-${LITMUS_VERSION}/charts/generic/experiments.yaml + rm -r ./chaos-charts-${LITMUS_VERSION} +} + +function k8s_docs_status() { + if [ "${1}" == "--get" ]; then + echo "${COLOR_YELLOW}Message from docs_status function:${COLOR_RESET} Start to get status docservice" + bash ./sources/litmus/docs-status.sh > /dev/null 2>&1 & + docs_statusPID=$! + echo "${COLOR_YELLOW}Message from docs_status function:${COLOR_RESET} docs_status script PID is: ${docs_statusPID}" + elif [ "${1}" == "--stop" ]; then + if ps | grep "${docs_statusPID}"; then + echo "${COLOR_YELLOW}Message from docs_status function:${COLOR_RESET} Process is running now. Kill docs-status script with PID: ${docs_statusPID}" + kill ${docs_statusPID} + docs_status_passed+=("${2}") + else + echo "${COLOR_RED}Message from docs_status function:${COLOR_RESET} Process with PID ${docs_statusPID} did not found. looks like docs_status did not get 200 code" + docs_status_failed+=("${2}") + fi + fi +} + +function k8s_litmus_test() { + # Declare litmus variables + local litmus_path="./sources/litmus" + local litmus_rbac_path="${litmus_path}/rbac" + local litmus_ex_path="${litmus_path}/experiments" + + local litmus_ex_array=($(ls ${litmus_ex_path} | shuf )) + local litmus_rbac_array=($(ls ${litmus_rbac_path})) + + local litmus_ex_name=( + "docs-chaos-pod-delete" + "docs-chaos-pod-cpu-hog" + "docs-chaos-pod-memory-hog" + "docs-chaos-pod-network-latency" + "docs-chaos-container-kill" + "docs-chaos-pod-network-loss" + "docs-chaos-pod-network-duplication") + + # Prepare ex manifests for tests on converter deployment too + # Uncomment if need tests on converter deployment too + + #for ex in "${litmus_ex_array[@]:0:3}"; do + # sed -i 's|app=docservice|app=converter|' ${litmus_ex_path}/${ex} + #done + + # Apply all litmus rbac + for rbac in ${litmus_rbac_array[@]}; do + echo "${COLOR_BLUE}Apply ${rbac}${COLOR_RESET}" + kubectl apply -f ${litmus_rbac_path}/${rbac} + sleep 4 + done + + echo + echo "${COLOR_BLUE}🔨⎈ All rbac for litmus was applied${COLOR_RESET}" + echo + + # Start litmus chaos tests + for ex in ${litmus_ex_name[@]}; do + + echo + echo "${COLOR_BLUE}🔨⎈ Start test: ${ex}${COLOR_RESET}" + echo + + # Start to get docs_status + k8s_docs_status --get + + # Apply litmus chaos test manifest + kubectl apply -f ${litmus_ex_path}/${ex}.yaml + + # Get cluster info + kubectl get pods -n default + + # Wait for Litmus chaos will be injected + sleep 160 + + for i in {1..40}; do + local PHASE="$(kubectl describe chaosresult ${ex} -n default | grep Phase | awk '{print $2}' || true )" + local VERDICT="$(kubectl describe chaosresult ${ex} -n default | grep Verdict | awk '{print $2}' || true )" + if [ "${PHASE}" == "Running" ] || [ "${VERDICT}" == "Awaited" ]; then + echo "${COLOR_BLUE}${i}. Test ${ex} is in progress, please wait...${COLOR_RESET}" + sleep 5 + else + if [ "${PHASE}" == "Completed" ] && [ "${VERDICT}" != "Awaited" ]; then + echo "${COLOR_BLUE}Test ${ex} is completed${COLOR_RESET}" + fi + break + fi + done + + local GENERAL_VERDICT="$(kubectl describe chaosresult ${ex} -n default | grep Verdict | awk '{print $2}' || true )" + + if [ "${GENERAL_VERDICT}" == "Pass" ]; then + echo "${COLOR_GREEN}☑ OK: Test ${ex} successfully passed${COLOR_RESET}" + litmus_passed+=("${ex}") + elif [ "${GENERAL_VERDICT}" != "Pass" ]; then + echo "${COLOR_RED}FAILED: Test ${ex} is failed${COLOR_RESET}" + litmus_failed+=("${ex}") + fi + + sleep 10 + echo + echo "${COLOR_BLUE}🔨⎈ Get ${ex} result${COLOR_RESET}" + echo + + # Check test result + kubectl describe chaosresult ${ex} -n default + + # Cleanup all litmus chaosengines + kubectl delete chaosresult ${ex} -n default + kubectl delete chaosengine --all -n default + + now=$(date) + k8s_docs_status --stop ${ex} + echo "${COLOR_BLUE} TEST WAS ENDED AT ${now}${COLOR_RESET}" + # Wait before new test is started + sleep 30 + done + + kubectl get pods --namespace default + + # Test results + if [ -n "${docs_status_passed}" ]; then + for v in ${docs_status_passed[@]}; do + echo "${COLOR_YELLOW}⚠ DOCS STATUS RESULT${COLOR_RESET}: docs_status all time get 200 from docservice pod on litmus test: ${v}" + done + fi + + if [ -n "${docs_status_failed}" ]; then + for v in ${docs_status_failed[@]}; do + echo "${COLOR_RED}⚠ DOCS STATUS RESULT${COLOR_RESET}: docs_status did not get 200 from docservice pod on litmus test: ${v} ${COLOR_RESET}" + done + fi + + if [ -n "${litmus_passed}" ]; then + for v in ${litmus_passed[@]}; do + echo "${COLOR_GREEN}☑ OK: litmus test ${v} successfully Passed${COLOR_RESET}" + done + k8s_helm_test + fi + + if [ -n "${litmus_failed}" ]; then + for v in ${litmus_failed[@]}; do + echo "${COLOR_RED}⚠ FAILED: litmus test ${v} has no Passed verdict${COLOR_RESET}" + done + k8s_helm_test + fi + + if [ -n "${litmus_failed}" ] || [ -n "${docs_status_failed}" ]; then + echo "${COLOR_RED} ⚠ ⚠ ATTENTION: Some tests if failed. Please check logs ${COLOR_RESET}" + exit 1 + fi + +} + +function k8s_helm_install() { + echo "${COLOR_BLUE}🔨⎈ Deploy docs in k8s...${COLOR_RESET}" + local EXIT_CODE=0 + helm install ${RELEASE_NAME:="docs"} . --wait || EXIT_CODE=$? + if [[ "${EXIT_CODE}" == 0 ]]; then + sleep 60 + k8s_get_info + echo "${COLOR_BLUE} 🔨⎈ Docs successfully deployed. Continue.. Run Helm test.${COLOR_RESET}" + else + echo "${COLOR_RED}🔥 Docs deploy failed. Exit${COLOR_RESET}" + k8s_get_info + k8s_pods_logs + exit ${EXIT_CODE} + fi + } + +function k8s_helm_test() { + echo "${COLOR_BLUE}🔨⎈ Start helm test..${COLOR_RESET}" + helm test ${RELEASE_NAME} --namespace=default + if [[ $? == 0 ]]; then + echo "${COLOR_GREEN} 👌👌👌⎈ Helm test success! ${COLOR_RESET}" + echo "${COLOR_BLUE} 🔨⎈ Get test logs... ${COLOR_RESET}" + kubectl logs -f test-ds --namespace=default + else + echo "${COLOR_RED} Helm test FAILED. ${COLOR_RESET}" + exit 1 + fi + } + +function k8s_helm_upgrade() { + echo "${COLOR_BLUE}🔨⎈ Start helm upgrade..${COLOR_RESET}" + local EXIT_CODE=0 + helm upgrade ${RELEASE_NAME} --wait || EXIT_CODE=$? + if [[ $? == 0 ]]; then + echo "${COLOR_GREEN} 👌👌👌⎈ Helm upgrade success! ${COLOR_RESET}" + k8s_get_info + else + echo "${COLOR_RED} Helm upgrade FAILED. ${COLOR_RESET}" + exit ${EXIT_CODE} + fi +} + +function k8s_helm_test_only() { + # Run only helm install/test/upgrade + # This function will be runed on every created PR + k8s_ct_install +} + +function k8s_all_tests() { + # Run all availiable tests for k8s-Docs helm chart + # This function will be runed only if target branch is master + k8s_litmus_install + k8s_ct_install + k8s_litmus_test +} + +function main () { + common::get_colors + k8s_get_info + k8s_w8_workers + k8s_deploy_deps + k8s_wait_deps + if [ "${TARGET_BRANCH}" == "master" ] || [ "${TARGET_BRANCH}" == "main" ]; then + k8s_all_tests + else + echo "${COLOR_YELLOW}ATTENTION: Target branch is not master, run helm install/upgrade/test only${COLOR_RESET}" + k8s_helm_test_only + fi + } + +main diff --git a/sources/litmus/docs-node.yaml b/sources/litmus/docs-node.yaml new file mode 100644 index 0000000..606e056 --- /dev/null +++ b/sources/litmus/docs-node.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: docs-node +spec: + type: NodePort + selector: + app: docservice + ports: + # By default and for convenience, the `targetPort` is set to the same value as the `port` field. + - port: 80 + targetPort: 8888 + # Optional field + # By default and for convenience, the Kubernetes control plane will allocate a port from a range (default: 30000-32767) + nodePort: 30350 diff --git a/sources/litmus/docs-status.sh b/sources/litmus/docs-status.sh new file mode 100755 index 0000000..17409ce --- /dev/null +++ b/sources/litmus/docs-status.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -e + +DOCSERVICE_URL=$(minikube service docs-node --url) +EXIT_CODE=0 + +while [ "${EXIT_CODE}" == "0" ]; do + curl --fail -I --retry-all-errors --connect-timeout 5 --max-time 40 --retry 3 --retry-delay 10 --retry-max-time 40 ${DOCSERVICE_URL}/index.html || EXIT_CODE=$? + if [ "${EXIT_CODE}" != "0" ]; then + echo "Process failed. Exit with exit code: ${EXIT_CODE}" + exit ${EXIT_CODE} + fi + sleep 1 +done diff --git a/sources/litmus/experiments/docs-chaos-container-kill.yaml b/sources/litmus/experiments/docs-chaos-container-kill.yaml new file mode 100644 index 0000000..794774e --- /dev/null +++ b/sources/litmus/experiments/docs-chaos-container-kill.yaml @@ -0,0 +1,41 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: docs-chaos + namespace: default +spec: + # It can be active/stop + engineState: 'active' + appinfo: + appns: 'default' + applabel: 'app=docservice' + appkind: 'deployment' + chaosServiceAccount: container-kill-sa + experiments: + - name: container-kill + spec: + components: + env: + # provide the total chaos duration + - name: TOTAL_CHAOS_DURATION + value: '60' + + # provide the chaos interval + - name: CHAOS_INTERVAL + value: '60' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + - name: PODS_AFFECTED_PERC + value: '' + + - name: TARGET_CONTAINER + value: 'docservice' diff --git a/sources/litmus/experiments/docs-chaos-pod-cpu-hog.yaml b/sources/litmus/experiments/docs-chaos-pod-cpu-hog.yaml new file mode 100644 index 0000000..81fbc41 --- /dev/null +++ b/sources/litmus/experiments/docs-chaos-pod-cpu-hog.yaml @@ -0,0 +1,39 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: docs-chaos + namespace: default +spec: + # It can be active/stop + engineState: 'active' + appinfo: + appns: 'default' + applabel: 'app=docservice' + appkind: 'deployment' + chaosServiceAccount: pod-cpu-hog-sa + experiments: + - name: pod-cpu-hog + spec: + components: + env: + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + - name: CPU_CORES + value: '2' + + ## Percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + ## provide the cluster runtime + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + # procide container name under the chaos + - name: TARGET_CONTAINER + value: 'docservice' diff --git a/sources/litmus/experiments/docs-chaos-pod-delete.yaml b/sources/litmus/experiments/docs-chaos-pod-delete.yaml new file mode 100644 index 0000000..a365342 --- /dev/null +++ b/sources/litmus/experiments/docs-chaos-pod-delete.yaml @@ -0,0 +1,36 @@ +--- +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: docs-chaos + namespace: default +spec: + appinfo: + appns: 'default' + applabel: 'app=docservice' + appkind: 'deployment' + # It can be true/false + annotationCheck: 'false' + # It can be active/stop + engineState: 'active' + #ex. values: ns1:name=percona,ns2:run=nginx + auxiliaryAppInfo: '' + chaosServiceAccount: pod-delete-sa + # It can be delete/retain + jobCleanUpPolicy: 'delete' + experiments: + - name: pod-delete + spec: + components: + env: + # set chaos duration (in sec) as desired + - name: TOTAL_CHAOS_DURATION + value: '30' + + # set chaos interval (in sec) as desired + - name: CHAOS_INTERVAL + value: '10' + + # pod failures without '--force' & default terminationGracePeriodSeconds + - name: FORCE + value: 'false' diff --git a/sources/litmus/experiments/docs-chaos-pod-memory-hog.yaml b/sources/litmus/experiments/docs-chaos-pod-memory-hog.yaml new file mode 100644 index 0000000..9fe0ed8 --- /dev/null +++ b/sources/litmus/experiments/docs-chaos-pod-memory-hog.yaml @@ -0,0 +1,40 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: docs-chaos + namespace: default +spec: + # It can be active/stop + engineState: 'active' + appinfo: + appns: 'default' + applabel: 'app=docservice' + appkind: 'deployment' + chaosServiceAccount: pod-memory-hog-sa + experiments: + - name: pod-memory-hog + spec: + components: + env: + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + # Enter the amount of memory in megabytes to be consumed by the application pod + - name: MEMORY_CONSUMPTION + value: '3000' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' + + ## provide the cluster runtime + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + # provide name of the target container under chaos + - name: TARGET_CONTAINER + value: 'docservice' diff --git a/sources/litmus/experiments/docs-chaos-pod-network-duplication.yaml b/sources/litmus/experiments/docs-chaos-pod-network-duplication.yaml new file mode 100644 index 0000000..5565ad4 --- /dev/null +++ b/sources/litmus/experiments/docs-chaos-pod-network-duplication.yaml @@ -0,0 +1,38 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: docs-chaos + namespace: default +spec: + # It can be active/stop + engineState: 'active' + appinfo: + appns: 'default' + # FYI, To see app label, apply kubectl get pods --show-labels + applabel: 'app=docservice' + appkind: 'deployment' + chaosServiceAccount: pod-network-duplication-sa + experiments: + - name: pod-network-duplication + spec: + components: + env: + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + - name: NETWORK_PACKET_DUPLICATION_PERCENTAGE + value: '100' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' diff --git a/sources/litmus/experiments/docs-chaos-pod-network-latency.yaml b/sources/litmus/experiments/docs-chaos-pod-network-latency.yaml new file mode 100644 index 0000000..f291b64 --- /dev/null +++ b/sources/litmus/experiments/docs-chaos-pod-network-latency.yaml @@ -0,0 +1,38 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: docs-chaos + namespace: default +spec: + # It can be active/stop + engineState: 'active' + appinfo: + appns: 'default' + # FYI, To see app label, apply kubectl get pods --show-labels + applabel: 'app=docservice' + appkind: 'deployment' + chaosServiceAccount: pod-network-latency-sa + experiments: + - name: pod-network-latency + spec: + components: + env: + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + - name: NETWORK_LATENCY + value: '5000' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' diff --git a/sources/litmus/experiments/docs-chaos-pod-network-loss.yaml b/sources/litmus/experiments/docs-chaos-pod-network-loss.yaml new file mode 100644 index 0000000..9c2ed06 --- /dev/null +++ b/sources/litmus/experiments/docs-chaos-pod-network-loss.yaml @@ -0,0 +1,38 @@ +apiVersion: litmuschaos.io/v1alpha1 +kind: ChaosEngine +metadata: + name: docs-chaos + namespace: default +spec: + # It can be active/stop + engineState: 'active' + appinfo: + appns: 'default' + # FYI, To see app label, apply kubectl get pods --show-labels + applabel: 'app=docservice' + appkind: 'deployment' + chaosServiceAccount: pod-network-loss-sa + experiments: + - name: pod-network-loss + spec: + components: + env: + - name: TOTAL_CHAOS_DURATION + value: '60' # in seconds + + - name: NETWORK_PACKET_LOSS_PERCENTAGE + value: '100' + + # provide the name of container runtime + # for litmus LIB, it supports docker, containerd, crio + # for pumba LIB, it supports docker only + - name: CONTAINER_RUNTIME + value: 'docker' + + # provide the socket file path + - name: SOCKET_PATH + value: '/var/run/docker.sock' + + ## percentage of total pods to target + - name: PODS_AFFECTED_PERC + value: '' diff --git a/sources/litmus/rbac/container-kill-rbac.yaml b/sources/litmus/rbac/container-kill-rbac.yaml new file mode 100644 index 0000000..157fa06 --- /dev/null +++ b/sources/litmus/rbac/container-kill-rbac.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: container-kill-sa + namespace: default + labels: + name: container-kill-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: container-kill-sa + namespace: default + labels: + name: container-kill-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["list","get","create"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: container-kill-sa + namespace: default + labels: + name: container-kill-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: container-kill-sa +subjects: +- kind: ServiceAccount + name: container-kill-sa + namespace: default diff --git a/sources/litmus/rbac/pod-cpu-hog-rbac.yaml b/sources/litmus/rbac/pod-cpu-hog-rbac.yaml new file mode 100644 index 0000000..61897a3 --- /dev/null +++ b/sources/litmus/rbac/pod-cpu-hog-rbac.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-cpu-hog-sa + namespace: default + labels: + name: pod-cpu-hog-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-cpu-hog-sa + namespace: default + labels: + name: pod-cpu-hog-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-cpu-hog-sa + namespace: default + labels: + name: pod-cpu-hog-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-cpu-hog-sa +subjects: +- kind: ServiceAccount + name: pod-cpu-hog-sa + namespace: default diff --git a/sources/litmus/rbac/pod-delete-rbac.yaml b/sources/litmus/rbac/pod-delete-rbac.yaml new file mode 100644 index 0000000..2890ec1 --- /dev/null +++ b/sources/litmus/rbac/pod-delete-rbac.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-delete-sa + namespace: default + labels: + name: pod-delete-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-delete-sa + namespace: default + labels: + name: pod-delete-sa +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-delete-sa + namespace: default + labels: + name: pod-delete-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-delete-sa +subjects: +- kind: ServiceAccount + name: pod-delete-sa + namespace: default diff --git a/sources/litmus/rbac/pod-memory-hog-rbac.yaml b/sources/litmus/rbac/pod-memory-hog-rbac.yaml new file mode 100644 index 0000000..0791426 --- /dev/null +++ b/sources/litmus/rbac/pod-memory-hog-rbac.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-memory-hog-sa + namespace: default + labels: + name: pod-memory-hog-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-memory-hog-sa + namespace: default + labels: + name: pod-memory-hog-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-memory-hog-sa + namespace: default + labels: + name: pod-memory-hog-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-memory-hog-sa +subjects: +- kind: ServiceAccount + name: pod-memory-hog-sa + namespace: default diff --git a/sources/litmus/rbac/pod-network-duplication-rbac.yaml b/sources/litmus/rbac/pod-network-duplication-rbac.yaml new file mode 100644 index 0000000..a65d77f --- /dev/null +++ b/sources/litmus/rbac/pod-network-duplication-rbac.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-network-duplication-sa + namespace: default + labels: + name: pod-network-duplication-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-network-duplication-sa + namespace: default + labels: + name: pod-network-duplication-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-network-duplication-sa + namespace: default + labels: + name: pod-network-duplication-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-network-duplication-sa +subjects: +- kind: ServiceAccount + name: pod-network-duplication-sa + namespace: default diff --git a/sources/litmus/rbac/pod-network-latency.yaml b/sources/litmus/rbac/pod-network-latency.yaml new file mode 100644 index 0000000..f3edb70 --- /dev/null +++ b/sources/litmus/rbac/pod-network-latency.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-network-latency-sa + namespace: default + labels: + name: pod-network-latency-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-network-latency-sa + namespace: default + labels: + name: pod-network-latency-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-network-latency-sa + namespace: default + labels: + name: pod-network-latency-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-network-latency-sa +subjects: +- kind: ServiceAccount + name: pod-network-latency-sa + namespace: default diff --git a/sources/litmus/rbac/pod-network-loss-rbac.yaml b/sources/litmus/rbac/pod-network-loss-rbac.yaml new file mode 100644 index 0000000..63736eb --- /dev/null +++ b/sources/litmus/rbac/pod-network-loss-rbac.yaml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-network-loss-sa + namespace: default + labels: + name: pod-network-loss-sa + app.kubernetes.io/part-of: litmus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: pod-network-loss-sa + namespace: default + labels: + name: pod-network-loss-sa + app.kubernetes.io/part-of: litmus +rules: +- apiGroups: [""] + resources: ["pods","events"] + verbs: ["create","list","get","patch","update","delete","deletecollection"] +- apiGroups: [""] + resources: ["pods/exec","pods/log","replicationcontrollers"] + verbs: ["create","list","get"] +- apiGroups: ["batch"] + resources: ["jobs"] + verbs: ["create","list","get","delete","deletecollection"] +- apiGroups: ["apps"] + resources: ["deployments","statefulsets","daemonsets","replicasets"] + verbs: ["list","get"] +- apiGroups: ["apps.openshift.io"] + resources: ["deploymentconfigs"] + verbs: ["list","get"] +- apiGroups: ["argoproj.io"] + resources: ["rollouts"] + verbs: ["list","get"] +- apiGroups: ["litmuschaos.io"] + resources: ["chaosengines","chaosexperiments","chaosresults"] + verbs: ["create","list","get","patch","update"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-network-loss-sa + namespace: default + labels: + name: pod-network-loss-sa + app.kubernetes.io/part-of: litmus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: pod-network-loss-sa +subjects: +- kind: ServiceAccount + name: pod-network-loss-sa + namespace: default diff --git a/sources/metrics/documentserver-statsd-exporter.json b/sources/metrics/documentserver-statsd-exporter.json new file mode 100644 index 0000000..d7a7c6f --- /dev/null +++ b/sources/metrics/documentserver-statsd-exporter.json @@ -0,0 +1,2797 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 8, + "links": [], + "panels": [ + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 22, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "none" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 76, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_expireDoc_connections_edit", + "interval": "", + "legendFormat": "number of connections for editing", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Edit", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "none" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 1 + }, + "hiddenSeries": false, + "id": 78, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_expireDoc_connections_view", + "interval": "", + "legendFormat": "number of connections for viewing", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "View", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "none" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 1 + }, + "hiddenSeries": false, + "id": 80, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_expireDoc_connections_edit + ds_expireDoc_connections_view", + "interval": "", + "legendFormat": "sum of connections for editing and viewing", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Sum", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Сonnecting", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 56, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 2 + }, + "hiddenSeries": false, + "id": 52, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_coauth_openDocument_open_sum[5m])/rate(ds_coauth_openDocument_open_count[5m])", + "interval": "", + "legendFormat": "moving average time of opening documents (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 2 + }, + "hiddenSeries": false, + "id": 54, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_openDocument_open", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 2 + }, + "hiddenSeries": false, + "id": 74, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_openDocument_open_count - ds_coauth_openDocument_open_count offset 1m", + "interval": "", + "legendFormat": "number of open documents", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Opening Documents", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 2 + }, + "id": 10, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": {}, + "custom": {}, + "thresholds": { + "mode": "absolute", + "steps": [] + }, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 3 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_conv_downloadFile_sum[5m])/rate(ds_conv_downloadFile_count[5m])", + "interval": "", + "legendFormat": "moving average time of downloading documents (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 3 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_conv_downloadFile", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 3 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_conv_downloadFile_count - ds_conv_downloadFile_count offset 1m", + "interval": "", + "legendFormat": "number of downloaded files", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Downloading Documents", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 12, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 4 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_conv_allconvert_sum[5m])/rate(ds_conv_allconvert_count[5m])", + "interval": "", + "legendFormat": "moving average time of converting documents (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 4 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_conv_allconvert", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 4 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_conv_allconvert_count - ds_conv_allconvert_count offset 1m", + "interval": "", + "legendFormat": "number of conversions", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Converting Documents", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 32, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 5 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_conv_spawnSync_sum[5m])/rate(ds_conv_spawnSync_count[5m])", + "interval": "", + "legendFormat": "moving average time of converting process (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 5 + }, + "hiddenSeries": false, + "id": 28, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_conv_spawnSync", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 5 + }, + "hiddenSeries": false, + "id": 26, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_conv_spawnSync_count - ds_conv_spawnSync_count offset 1m", + "interval": "", + "legendFormat": "number of conversion process", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Converting Process", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 5 + }, + "id": 48, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 6 + }, + "hiddenSeries": false, + "id": 44, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_coauth_data_auth_sum[5m])/rate(ds_coauth_data_auth_count[5m])", + "interval": "", + "legendFormat": "moving average time of completing authorization (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 6 + }, + "hiddenSeries": false, + "id": 46, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_data_auth", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 6 + }, + "hiddenSeries": false, + "id": 42, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_data_auth_count - ds_coauth_data_auth_count offset 1m", + "interval": "", + "legendFormat": "number of authorizations", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Authorizations", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 6 + }, + "id": 64, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 60, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_coauth_data_getLock_sum[5m])/rate(ds_coauth_data_getLock_count[5m])", + "interval": "", + "legendFormat": "moving average time of getLock duration (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 62, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_data_getLock", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 58, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_data_getLock_count - ds_coauth_data_getLock_count offset 1m", + "interval": "", + "legendFormat": "number of getLocks", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Get Lock", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 40, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 36, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_coauth_data_saveChanges_sum[5m])/rate(ds_coauth_data_saveChanges_count[5m])", + "interval": "", + "legendFormat": "moving average time of saving changes (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 8 + }, + "hiddenSeries": false, + "id": 38, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_data_saveChanges", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 8 + }, + "hiddenSeries": false, + "id": 34, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_data_saveChanges_count - ds_coauth_data_saveChanges_count offset 1m", + "interval": "", + "legendFormat": "number of saved changes ", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Saving Changes", + "type": "row" + }, + { + "collapsed": true, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 72, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 9 + }, + "hiddenSeries": false, + "id": 70, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(ds_coauth_openDocument_imgurls_sum[5m])/rate(ds_coauth_openDocument_imgurls_count[5m])", + "interval": "", + "legendFormat": "moving average time to opening documents with uploading images (for 5 minutes)", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 8, + "y": 9 + }, + "hiddenSeries": false, + "id": 68, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_openDocument_imgurls", + "interval": "", + "legendFormat": "quantile=\"{{quantile}}\"", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Quantile (0.5, 0.9, 0.99)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 8, + "x": 16, + "y": 9 + }, + "hiddenSeries": false, + "id": 66, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.4.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "ds_coauth_openDocument_imgurls_count - ds_coauth_openDocument_imgurls_count offset 1m", + "interval": "", + "legendFormat": "the number of opened documents with the uploaded images", + "queryType": "randomWalk", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Count", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Uploading Images", + "type": "row" + } + ], + "refresh": "30s", + "schemaVersion": 27, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Statsd DS", + "uid": "LDjoK2UGz", + "version": 24 +} \ No newline at end of file diff --git a/sources/metrics/kubernetes-cluster-resourses.json b/sources/metrics/kubernetes-cluster-resourses.json new file mode 100644 index 0000000..b3304ae --- /dev/null +++ b/sources/metrics/kubernetes-cluster-resourses.json @@ -0,0 +1,2118 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 315, + "graphTooltip": 0, + "id": 1, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 33, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Network I/O pressure", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 3600000, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 32, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (rate (container_network_receive_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Received", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{kubernetes_io_hostname=~\"^$Node$\"}[1m]))", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Sent", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "title": "Network I/O pressure", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 34, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Total usage", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)" + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 4, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Cluster memory usage", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)" + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 8 + }, + "id": 6, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) / sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Cluster CPU usage (1m avg)", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)" + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 8 + }, + "id": 7, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) / sum (container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) * 100", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 10 + } + ], + "title": "Cluster filesystem usage", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 13 + }, + "id": 9, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (container_memory_working_set_bytes{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Used", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 13 + }, + "id": 10, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (machine_memory_bytes{kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Total", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 13 + }, + "id": 11, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{id=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m]))", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Used", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 13 + }, + "id": 12, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (machine_cpu_cores{kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Total", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 13 + }, + "id": 13, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (container_fs_usage_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Used", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 13 + }, + "id": 14, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (container_fs_limit_bytes{device=~\"^/dev/[sv]d[a-z][1-9]$\",id=\"/\",kubernetes_io_hostname=~\"^$Node$\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "title": "Total", + "type": "stat" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 35, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Pods CPU usage", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 17, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "sum (rate (container_cpu_usage_seconds_total{namespace!=\"kube-system\", image!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_cpu", + "range": true, + "refId": "A", + "step": 10 + } + ], + "title": "Pods CPU usage (1m avg)", + "type": "timeseries" + }, + { + "collapsed": true, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 36, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "decimals": 3, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 24 + }, + "height": "", + "id": 23, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "stack": false, + "steppedLine": true, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{systemd_service_name!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (systemd_service_name)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ systemd_service_name }}", + "metric": "container_cpu", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "title": "System services CPU usage (1m avg)", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ] + } + ], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "System services CPU usage", + "type": "row" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 37, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Containers CPU usage", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": true, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 24, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum (rate (container_cpu_usage_seconds_total{namespace!=\"kube-system\",image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (container)", + "hide": false, + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ container }}", + "metric": "container_cpu", + "range": true, + "refId": "B", + "step": 10 + } + ], + "title": "Containers CPU usage (1m avg)", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 38, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "All processes CPU usage", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 20, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (rate (container_cpu_usage_seconds_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_cpu", + "refId": "A", + "step": 10 + } + ], + "title": "All processes CPU usage (1m avg)", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 48 + }, + "id": 39, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Pods memory usage", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 49 + }, + "id": 25, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "sum (container_memory_working_set_bytes{namespace!=\"kube-system\",image!=\"\",kubernetes_io_hostname=~\"^$Node$\"}) by (pod)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ pod_name }}", + "metric": "container_memory_usage:sort_desc", + "range": true, + "refId": "A", + "step": 10 + } + ], + "title": "Pods memory usage", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 56 + }, + "id": 41, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Containers memory usage", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 57 + }, + "id": 27, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum (container_memory_working_set_bytes{namespace!=\"kube-system\",image!=\"\",name!~\"^k8s_.*\",kubernetes_io_hostname=~\"^$Node$\"}) by (container)", + "hide": false, + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ container }}", + "metric": "container_memory_usage:sort_desc", + "range": true, + "refId": "B", + "step": 10 + } + ], + "title": "Containers memory usage", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 64 + }, + "id": 42, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "All processes memory usage", + "type": "row" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 65 + }, + "hiddenSeries": false, + "id": 28, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.6", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": true, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (container_memory_working_set_bytes{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}) by (id)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ id }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "All processes memory usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 79 + }, + "id": 43, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "Pods network I/O", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 80 + }, + "id": 16, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "sum (rate (container_network_receive_bytes_total{namespace!=\"kube-system\",image!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod)", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> {{ pod }}", + "metric": "network", + "range": true, + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "- sum (rate (container_network_transmit_bytes_total{namespace!=\"kube-system\",image!=\"\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (pod)", + "hide": false, + "legendFormat": "<- {{pod}}", + "range": true, + "refId": "B" + } + ], + "title": "Pods network I/O (1m avg)", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 87 + }, + "id": 45, + "panels": [], + "targets": [ + { + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "refId": "A" + } + ], + "title": "All processes network I/O", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 88 + }, + "id": 29, + "links": [], + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "sum (rate (container_network_receive_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "-> {{ id }}", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "expr": "- sum (rate (container_network_transmit_bytes_total{id!=\"/\",kubernetes_io_hostname=~\"^$Node$\"}[1m])) by (id)", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "<- {{ id }}", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "title": "All processes network I/O (1m avg)", + "type": "timeseries" + } + ], + "refresh": "10s", + "schemaVersion": 37, + "style": "dark", + "tags": [ + "kubernetes" + ], + "templating": { + "list": [ + { + "allValue": ".*", + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "definition": "", + "hide": 0, + "includeAll": true, + "multi": false, + "name": "Node", + "options": [], + "query": { + "query": "label_values(kubernetes_io_hostname)", + "refId": "Prometheus-Node-Variable-Query" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Kubernetes cluster resourses", + "uid": "PeW0nbn4z", + "version": 8, + "weekStart": "" +} diff --git a/sources/scc/docs-components.yaml b/sources/scc/docs-components.yaml new file mode 100644 index 0000000..b72476d --- /dev/null +++ b/sources/scc/docs-components.yaml @@ -0,0 +1,21 @@ +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: scc-docs-components +allowPrivilegedContainer: false +runAsUser: + type: MustRunAs + uid: 101 +seLinuxContext: + type: MustRunAs +fsGroup: + type: MustRunAs + ranges: + - max: 101 + min: 101 +supplementalGroups: + type: MustRunAs +users: [] +groups: [] +seccompProfiles: +- runtime/default diff --git a/sources/scc/helm-components.yaml b/sources/scc/helm-components.yaml new file mode 100644 index 0000000..2612531 --- /dev/null +++ b/sources/scc/helm-components.yaml @@ -0,0 +1,21 @@ +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + name: scc-helm-components +allowPrivilegedContainer: false +runAsUser: + type: MustRunAs + uid: 1001 +seLinuxContext: + type: MustRunAs +fsGroup: + type: MustRunAs + ranges: + - max: 1001 + min: 1001 +supplementalGroups: + type: MustRunAs +users: [] +groups: [] +seccompProfiles: +- runtime/default diff --git a/sources/scripts/add_shardkey.py b/sources/scripts/add_shardkey.py new file mode 100644 index 0000000..ffdb7d6 --- /dev/null +++ b/sources/scripts/add_shardkey.py @@ -0,0 +1,143 @@ +import os +import sys +import logging + +redisConnectorName = os.environ.get('REDIS_CONNECTOR_NAME') +redisHost = os.environ.get('REDIS_SERVER_HOST') +redisPort = os.environ.get('REDIS_SERVER_PORT') +redisUser = os.environ.get('REDIS_SERVER_USER') +redisPassword = os.environ.get('REDIS_SERVER_PWD') +redisDBNum = os.environ.get('REDIS_SERVER_DB_KEYS_NUM') +redisConnectTimeout = 15 +if os.environ.get('REDIS_CLUSTER_NODES'): + redisClusterNodes = list(os.environ.get('REDIS_CLUSTER_NODES').split(" ")) + redisClusterNode = redisClusterNodes[0].split(":")[0] + redisClusterPort = redisClusterNodes[0].split(":")[1] +if redisConnectorName == 'ioredis': + redisSentinelGroupName = os.environ.get('REDIS_SENTINEL_GROUP_NAME') + +shardKey = os.environ.get('DEFAULT_SHARD_KEY') +epIP = os.environ.get('SHARD_IP') +epPort = os.environ.get('SHARD_PORT') +ipShard = epIP + ':' + epPort + +total_result = {} + + +def init_logger(name): + logger = logging.getLogger(name) + formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + logger.setLevel(logging.DEBUG) + stdout = logging.StreamHandler() + stdout.setFormatter(logging.Formatter(formatter)) + stdout.setLevel(logging.DEBUG) + logger.addHandler(stdout) + logger.info('Running the Redis initialization script with the "shardKey" value for the replica\n') + + +def get_redis_status(): + import redis + global rc + try: + rc = redis.Redis( + host=redisHost, + port=redisPort, + db=redisDBNum, + password=redisPassword, + username=redisUser, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error(f'Failed to check the availability of the Redis Standalone... {msg_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Standalone') + return rc.ping() + + +def get_redis_cluster_status(): + from redis.cluster import RedisCluster as Redis + from redis.cluster import ClusterNode + global rc + try: + nodes = [ClusterNode(redisClusterNode, redisClusterPort)] + rc = Redis( + startup_nodes=nodes, + username=redisUser, + password=redisPassword, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error(f'Failed to check the availability of the Redis Cluster... {msg_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Cluster') + return rc.ping() + + +def get_redis_sentinel_status(): + import redis + from redis import Sentinel + global rc + try: + sentinel = Sentinel([(redisHost, redisPort)], socket_timeout=redisConnectTimeout) + master_host, master_port = sentinel.discover_master(redisSentinelGroupName) + rc = redis.Redis( + host=master_host, + port=master_port, + db=redisDBNum, + password=redisPassword, + username=redisUser, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error(f'Failed to check the availability of the Redis Sentinel... {msg_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Sentinel') + return rc.ping() + + +def add_redis_key(): + try: + rc.set(shardKey, ipShard) + rc.append(ipShard, f' {shardKey}') + test_key = rc.get(shardKey).decode('utf-8') + logger_test_ds.info(f'Shard Key Endpoint: {shardKey} = {test_key}') + except Exception as msg_check_redis: + logger_test_ds.error(f'Error when trying to write a ShardKey to Redis... {msg_check_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('The ShardKey was successfully recorded to Redis\n') + rc.close() + + +def init_redis(): + logger_test_ds.info('Checking Redis availability...') + if redisConnectorName == 'redis' and not os.environ.get('REDIS_CLUSTER_NODES'): + if get_redis_status() is True: + add_redis_key() + elif redisConnectorName == 'redis' and os.environ.get('REDIS_CLUSTER_NODES'): + if get_redis_cluster_status() is True: + add_redis_key() + elif redisConnectorName == 'ioredis': + if get_redis_sentinel_status() is True: + add_redis_key() + + +def total_status(): + if 'Failed' in total_result.values(): + logger_test_ds.error('Recording of "ShardKey" in Redis failed') + sys.exit(1) + + +init_logger('test') +logger_test_ds = logging.getLogger('test.ds') +init_redis() +total_status() diff --git a/sources/scripts/createdb.sql b/sources/scripts/createdb.sql new file mode 100644 index 0000000..0d05452 --- /dev/null +++ b/sources/scripts/createdb.sql @@ -0,0 +1,73 @@ +-- +-- Create schema onlyoffice +-- + +-- CREATE DATABASE onlyoffice ENCODING = 'UTF8' CONNECTION LIMIT = -1; + +-- ---------------------------- +-- Table structure for doc_changes +-- ---------------------------- +CREATE TABLE IF NOT EXISTS "public"."doc_changes" ( +"tenant" varchar(255) COLLATE "default" NOT NULL, +"id" varchar(255) COLLATE "default" NOT NULL, +"change_id" int4 NOT NULL, +"user_id" varchar(255) COLLATE "default" NOT NULL, +"user_id_original" varchar(255) COLLATE "default" NOT NULL, +"user_name" varchar(255) COLLATE "default" NOT NULL, +"change_data" text COLLATE "default" NOT NULL, +"change_date" timestamp without time zone NOT NULL, +PRIMARY KEY ("tenant", "id", "change_id") +) +WITH (OIDS=FALSE); + +-- ---------------------------- +-- Table structure for task_result +-- ---------------------------- +CREATE TABLE IF NOT EXISTS "public"."task_result" ( +"tenant" varchar(255) COLLATE "default" NOT NULL, +"id" varchar(255) COLLATE "default" NOT NULL, +"status" int2 NOT NULL, +"status_info" int4 NOT NULL, +"created_at" timestamp without time zone DEFAULT NOW(), +"last_open_date" timestamp without time zone NOT NULL, +"user_index" int4 NOT NULL DEFAULT 1, +"change_id" int4 NOT NULL DEFAULT 0, +"callback" text COLLATE "default" NOT NULL, +"baseurl" text COLLATE "default" NOT NULL, +"password" text COLLATE "default" NULL, +"additional" text COLLATE "default" NULL, +PRIMARY KEY ("tenant", "id") +) +WITH (OIDS=FALSE); + +CREATE OR REPLACE FUNCTION merge_db(_tenant varchar(255), _id varchar(255), _status int2, _status_info int4, _last_open_date timestamp without time zone, _user_index int4, _change_id int4, _callback text, _baseurl text, OUT isupdate char(5), OUT userindex int4) AS +$$ +DECLARE + t_var "public"."task_result"."user_index"%TYPE; +BEGIN + LOOP + -- first try to update the key + -- note that "a" must be unique + IF ((_callback <> '') IS TRUE) AND ((_baseurl <> '') IS TRUE) THEN + UPDATE "public"."task_result" SET last_open_date=_last_open_date, user_index=user_index+1,callback=_callback,baseurl=_baseurl WHERE tenant = _tenant AND id = _id RETURNING user_index into userindex; + ELSE + UPDATE "public"."task_result" SET last_open_date=_last_open_date, user_index=user_index+1 WHERE tenant = _tenant AND id = _id RETURNING user_index into userindex; + END IF; + IF found THEN + isupdate := 'true'; + RETURN; + END IF; + -- not there, so try to insert the key + -- if someone else inserts the same key concurrently, + -- we could get a unique-key failure + BEGIN + INSERT INTO "public"."task_result"(tenant, id, status, status_info, last_open_date, user_index, change_id, callback, baseurl) VALUES(_tenant, _id, _status, _status_info, _last_open_date, _user_index, _change_id, _callback, _baseurl) RETURNING user_index into userindex; + isupdate := 'false'; + RETURN; + EXCEPTION WHEN unique_violation THEN + -- do nothing, and loop to try the UPDATE again + END; + END LOOP; +END; +$$ +LANGUAGE plpgsql; diff --git a/sources/scripts/get_logs.sh b/sources/scripts/get_logs.sh new file mode 100755 index 0000000..a8e1f4b --- /dev/null +++ b/sources/scripts/get_logs.sh @@ -0,0 +1,4 @@ +#!/bin/bash +for i in `kubectl get pod | grep -i documentserver | awk '{print $1}'`; do + kubectl logs $i > $i.txt +done diff --git a/sources/scripts/remove_shardkey.py b/sources/scripts/remove_shardkey.py new file mode 100644 index 0000000..e712703 --- /dev/null +++ b/sources/scripts/remove_shardkey.py @@ -0,0 +1,161 @@ +import os +import sys +import subprocess +import logging + +redisConnectorName = os.environ.get('REDIS_CONNECTOR_NAME') +redisHost = os.environ.get('REDIS_SERVER_HOST') +redisPort = os.environ.get('REDIS_SERVER_PORT') +redisUser = os.environ.get('REDIS_SERVER_USER') +redisPassword = os.environ.get('REDIS_SERVER_PWD') +redisDBNum = os.environ.get('REDIS_SERVER_DB_KEYS_NUM') +redisConnectTimeout = 15 +if os.environ.get('REDIS_CLUSTER_NODES'): + redisClusterNodes = list(os.environ.get('REDIS_CLUSTER_NODES').split(" ")) + redisClusterNode = redisClusterNodes[0].split(":")[0] + redisClusterPort = redisClusterNodes[0].split(":")[1] +if redisConnectorName == 'ioredis': + redisSentinelGroupName = os.environ.get('REDIS_SENTINEL_GROUP_NAME') + +shardKey = os.environ.get('DEFAULT_SHARD_KEY') +epIP = os.environ.get('SHARD_IP') +epPort = os.environ.get('SHARD_PORT') +ipShard = epIP + ':' + epPort + +total_result = {} + + +def init_logger(name): + logger = logging.getLogger(name) + formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + logger.setLevel(logging.DEBUG) + stdout = logging.StreamHandler() + stdout.setFormatter(logging.Formatter(formatter)) + stdout.setLevel(logging.DEBUG) + logger.addHandler(stdout) + logger.info('Running a script to shutdown the Shard and clear the keys belonging to it from Redis\n') + + +def get_redis_status(): + import redis + global rc + try: + rc = redis.Redis( + host=redisHost, + port=redisPort, + db=redisDBNum, + password=redisPassword, + username=redisUser, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error('Failed to check the availability of the Redis Standalone... {}\n'.format(msg_redis)) + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Standalone') + return rc.ping() + + +def get_redis_cluster_status(): + from redis.cluster import RedisCluster as Redis + from redis.cluster import ClusterNode + global rc + try: + nodes = [ClusterNode(redisClusterNode, redisClusterPort)] + rc = Redis( + startup_nodes=nodes, + username=redisUser, + password=redisPassword, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error('Failed to check the availability of the Redis Cluster... {}\n'.format(msg_redis)) + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Cluster') + return rc.ping() + + +def get_redis_sentinel_status(): + import redis + from redis import Sentinel + global rc + try: + sentinel = Sentinel([(redisHost, redisPort)], socket_timeout=redisConnectTimeout) + master_host, master_port = sentinel.discover_master(redisSentinelGroupName) + rc = redis.Redis( + host=master_host, + port=master_port, + db=redisDBNum, + password=redisPassword, + username=redisUser, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error('Failed to check the availability of the Redis Sentinel... {}\n'.format(msg_redis)) + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Sentinel') + return rc.ping() + + +def clear_shard_key(): + if rc.exists(ipShard): + try: + get_keys_shard = rc.get(ipShard).decode('utf-8') + keys_shard = get_keys_shard.split() + pipe = rc.pipeline() + for key in keys_shard: + pipe.delete(key) + pipe.execute() + rc.delete(ipShard) + except Exception as msg_check_redis: + logger_test_ds.error('Error when trying to delete keys belonging to the {sk} shard from Redis... {em}\n'.format(sk=shardKey, em=msg_check_redis)) + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Keys belonging to {} have been successfully deleted from Redis\n'.format(shardKey)) + rc.close() + else: + logger_test_ds.info('Endpoint shard {} was not found in Redis\n'.format(shardKey)) + + +def clear_redis(): + logger_test_ds.info('Checking Redis availability...') + if redisConnectorName == 'redis' and not os.environ.get('REDIS_CLUSTER_NODES'): + if get_redis_status() is True: + clear_shard_key() + elif redisConnectorName == 'redis' and os.environ.get('REDIS_CLUSTER_NODES'): + if get_redis_cluster_status() is True: + clear_shard_key() + elif redisConnectorName == 'ioredis': + if get_redis_sentinel_status() is True: + clear_shard_key() + + +def shutdown_shard(): + shutdown_cmd = ["/bin/bash", "-c", "curl http://localhost:8000/internal/cluster/inactive -X PUT -s"] + process = subprocess.Popen(shutdown_cmd, stdout=subprocess.PIPE) + shutdown_result = process.communicate()[0].decode('utf-8') + if shutdown_result == "true": + clear_redis() + else: + logger_test_ds.error('The {} shard could not be disabled'.format(shardKey)) + sys.exit(1) + + +def total_status(): + if 'Failed' in total_result.values(): + logger_test_ds.error('Could not clear Redis of keys belonging to {}'.format(shardKey)) + sys.exit(1) + + +init_logger('test') +logger_test_ds = logging.getLogger('test.ds') +shutdown_shard() +total_status() diff --git a/sources/scripts/test_ds.py b/sources/scripts/test_ds.py new file mode 100755 index 0000000..b2dbf17 --- /dev/null +++ b/sources/scripts/test_ds.py @@ -0,0 +1,166 @@ +import os +import sys +import subprocess +import time +import logging + +url = 'http://docservice:8000/healthcheck' + +redisConnectorName = os.environ.get('REDIS_CONNECTOR_NAME') +redisHost = os.environ.get('REDIS_SERVER_HOST') +redisPort = os.environ.get('REDIS_SERVER_PORT') +redisUser = os.environ.get('REDIS_SERVER_USER') +redisPassword = os.environ.get('REDIS_SERVER_PWD') +redisDBNum = os.environ.get('REDIS_SERVER_DB_NUM') +redisConnectTimeout = 15 +if os.environ.get('REDIS_CLUSTER_NODES'): + redisClusterNodes = list(os.environ.get('REDIS_CLUSTER_NODES').split(" ")) + redisClusterNode = redisClusterNodes[0].split(":")[0] + redisClusterPort = redisClusterNodes[0].split(":")[1] +if redisConnectorName == 'ioredis': + redisSentinelGroupName = os.environ.get('REDIS_SENTINEL_GROUP_NAME') + +total_result = {} + + +def init_logger(name): + logger = logging.getLogger(name) + formatter = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + logger.setLevel(logging.DEBUG) + stdout = logging.StreamHandler() + stdout.setFormatter(logging.Formatter(formatter)) + stdout.setLevel(logging.DEBUG) + logger.addHandler(stdout) + logger.info('Running a script to test the availability of DocumentServer and dependencies\n') + + +def get_redis_status(): + import redis + global rc + try: + rc = redis.Redis( + host=redisHost, + port=redisPort, + db=redisDBNum, + password=redisPassword, + username=redisUser, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error(f'Failed to check the availability of the Redis Standalone... {msg_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Standalone') + return rc.ping() + + +def get_redis_cluster_status(): + from redis.cluster import RedisCluster as Redis + from redis.cluster import ClusterNode + global rc + try: + nodes = [ClusterNode(redisClusterNode, redisClusterPort)] + rc = Redis( + startup_nodes=nodes, + username=redisUser, + password=redisPassword, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error(f'Failed to check the availability of the Redis Cluster... {msg_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Cluster') + return rc.ping() + + +def get_redis_sentinel_status(): + import redis + from redis import Sentinel + global rc + try: + sentinel = Sentinel([(redisHost, redisPort)], socket_timeout=redisConnectTimeout) + master_host, master_port = sentinel.discover_master(redisSentinelGroupName) + rc = redis.Redis( + host=master_host, + port=master_port, + db=redisDBNum, + password=redisPassword, + username=redisUser, + socket_connect_timeout=redisConnectTimeout, + retry_on_timeout=True + ) + rc.ping() + except Exception as msg_redis: + logger_test_ds.error(f'Failed to check the availability of the Redis Sentinel... {msg_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + logger_test_ds.info('Successful connection to Redis Sentinel') + return rc.ping() + + +def check_redis_key(): + try: + rc.set('testDocsServer', 'ok') + test_key = rc.get('testDocsServer').decode('utf-8') + logger_test_ds.info(f'Test Key: {test_key}') + except Exception as msg_check_redis: + logger_test_ds.error(f'Error when trying to write a key to Redis... {msg_check_redis}\n') + total_result['CheckRedis'] = 'Failed' + else: + rc.delete('testDocsServer') + logger_test_ds.info('The test key was successfully recorded and deleted from Redis\n') + rc.close() + total_result['CheckRedis'] = 'Success' + + +def check_redis(): + logger_test_ds.info('Checking Redis availability...') + if redisConnectorName == 'redis' and not os.environ.get('REDIS_CLUSTER_NODES'): + if get_redis_status() is True: + check_redis_key() + elif redisConnectorName == 'redis' and os.environ.get('REDIS_CLUSTER_NODES'): + if get_redis_cluster_status() is True: + check_redis_key() + elif redisConnectorName == 'ioredis': + if get_redis_sentinel_status() is True: + check_redis_key() + + +def get_ds_status(): + import requests + from requests.adapters import HTTPAdapter + logger_test_ds.info('Checking DocumentServer availability...') + ds_adapter = HTTPAdapter(max_retries=3) + ds_session = requests.Session() + ds_session.mount(url, ds_adapter) + try: + response = ds_session.get(url, timeout=15) + except Exception as msg_url: + logger_test_ds.error(f'Failed to check the availability of the DocumentServer... {msg_url}\n') + total_result['CheckDS'] = 'Failed' + else: + logger_test_ds.info(f'The DocumentServer is available: {response.text}\n') + if response.text == 'true': + total_result['CheckDS'] = 'Success' + else: + total_result['CheckDS'] = 'Failed' + + +def total_status(): + logger_test_ds.info('As a result of the check, the following results were obtained:') + for key, value in total_result.items(): + logger_test_ds.info(f'{key} = {value}') + if total_result['CheckDS'] != 'Success': + sys.exit(1) + + +init_logger('test') +logger_test_ds = logging.getLogger('test.ds') +check_redis() +get_ds_status() +total_status() diff --git a/sources/shutdown-ds.yaml b/sources/shutdown-ds.yaml new file mode 100644 index 0000000..7a388a9 --- /dev/null +++ b/sources/shutdown-ds.yaml @@ -0,0 +1,39 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: shutdown-ds +spec: + backoffLimit: 2 + template: + spec: + volumes: + - name: shutdown-ds + configMap: + name: shutdown-ds + defaultMode: 0755 + containers: + - name: shutdown-ds + image: onlyoffice/docs-utils:8.0.1-1 + command: ["/bin/sh", "-c"] + args: ["/scripts/stop.sh"] + volumeMounts: + - name: shutdown-ds + mountPath: /scripts/stop.sh + subPath: stop.sh + restartPolicy: Never + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: shutdown-ds +data: + stop.sh: |- + #!/bin/bash + curlout="$(curl -v http://docservice:8000/internal/cluster/inactive -X PUT -s)" + if [[ "${curlout}" != "true" ]]; then + echo -e "\e[0;31m The server could not be disabled \e[0m" + exit 1 + else + echo work done + fi diff --git a/templates/NOTES.txt b/templates/NOTES.txt new file mode 100644 index 0000000..d90d702 --- /dev/null +++ b/templates/NOTES.txt @@ -0,0 +1,19 @@ +RELEASE VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if .Values.example.enabled }} + +Example was marked for installation. + Note: Example is intended for editors testing purposes only and must be disabled before launching the editors in production. + +{{- end }} + +{{- if .Values.tests.enabled }} + +You can test ONLYOFFICE Docs availability and access to connected dependencies using Helm test: + $ helm test {{ .Release.Name }} --namespace={{ .Release.Namespace }} + To view the log of the Pod, run the following command: + $ kubectl logs -f test-ds --namespace={{ .Release.Namespace }} + Note: This testing is for informational purposes only and cannot guarantee 100% availability results + +{{- end }} diff --git a/templates/RBAC/dashboard-role.yaml b/templates/RBAC/dashboard-role.yaml new file mode 100644 index 0000000..93d24b8 --- /dev/null +++ b/templates/RBAC/dashboard-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.grafana.enabled .Values.grafana.dashboard.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: dashboard-role + namespace: {{ include "ds.grafana.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "2" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["list", "get", "create", "update"] +{{- end }} diff --git a/templates/RBAC/dashboard-rolebinding.yaml b/templates/RBAC/dashboard-rolebinding.yaml new file mode 100644 index 0000000..0cab8a4 --- /dev/null +++ b/templates/RBAC/dashboard-rolebinding.yaml @@ -0,0 +1,26 @@ +{{- if and .Values.grafana.enabled .Values.grafana.dashboard.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: dashboard-binding + namespace: {{ include "ds.grafana.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "3" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +subjects: +- kind: ServiceAccount + name: dashboard-sa + namespace: {{ include "ds.grafana.namespace" . | quote }} +roleRef: + kind: Role + name: dashboard-role + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl new file mode 100644 index 0000000..157523b --- /dev/null +++ b/templates/_helpers.tpl @@ -0,0 +1,248 @@ +{{/* +Get the Redis password secret +*/}} +{{- define "ds.redis.secretName" -}} +{{- if or .Values.connections.redisPassword .Values.connections.redisNoPass -}} + {{- printf "%s-redis" .Release.Name -}} +{{- else if and (not .Values.redis.enabled) (.Values.connections.redisExistingSecret) -}} + {{- printf "%s" (tpl .Values.connections.redisExistingSecret $) -}} +{{- else if and .Values.redis.enabled .Values.redis.auth.password -}} + {{- printf "%s-%s" .Release.Name (tpl .Values.connections.redisExistingSecret $) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the redis password +*/}} +{{- define "ds.redis.pass" -}} +{{- $redisSecret := include "ds.redis.secretName" . }} +{{- $secretKey := (lookup "v1" "Secret" .Release.Namespace $redisSecret).data }} +{{- $keyValue := (get $secretKey .Values.connections.redisSecretKeyName) | b64dec }} +{{- if .Values.connections.redisPassword -}} + {{- printf "%s" .Values.connections.redisPassword -}} +{{- else if and .Values.redis.enabled .Values.redis.auth.password -}} + {{- printf "%s" .Values.redis.auth.password -}} +{{- else if $keyValue -}} + {{- printf "%s" $keyValue -}} +{{- end -}} +{{- end -}} + +{{/* +Return ds release name prefix for redis host name if redis subchart was deployed +*/}} +{{- define "ds.redis.subchart.prefix" -}} +{{- if .Values.redis.enabled -}} + {{- printf "%s-" .Release.Name -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for Redis +*/}} +{{- define "ds.redis.createSecret" -}} +{{- if or .Values.connections.redisPassword .Values.connections.redisNoPass (not .Values.connections.redisExistingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis password +*/}} +{{- define "ds.redis.password" -}} +{{- if not (empty .Values.connections.redisPassword) }} + {{- .Values.connections.redisPassword }} +{{- else if .Values.connections.redisNoPass }} + {{- printf "" }} +{{- else }} + {{- required "A Redis Password is required!" .Values.connections.redisPassword }} +{{- end }} +{{- end -}} + +{{/* +Get the info auth password secret +*/}} +{{- define "ds.info.secretName" -}} +{{- if .Values.documentserver.proxy.infoAllowedExistingSecret -}} + {{- printf "%s" (tpl .Values.documentserver.proxy.infoAllowedExistingSecret $) -}} +{{- else if .Values.documentserver.proxy.infoAllowedPassword -}} + {{- printf "%s-info-auth" .Release.Name -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for info auth +*/}} +{{- define "ds.info.createSecret" -}} +{{- if and .Values.documentserver.proxy.infoAllowedUser (not .Values.documentserver.proxy.infoAllowedExistingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return info auth password +*/}} +{{- define "ds.info.password" -}} +{{- if not (empty .Values.documentserver.proxy.infoAllowedPassword) }} + {{- .Values.documentserver.proxy.infoAllowedPassword }} +{{- else }} + {{- required "A info auth Password is required!" .Values.documentserver.proxy.infoAllowedPassword }} +{{- end }} +{{- end -}} + +{{/* +Get the PVC name +*/}} +{{- define "ds.pvc.name" -}} +{{- if .Values.persistence.existingClaim -}} + {{- printf "%s" (tpl .Values.persistence.existingClaim $) -}} +{{- else }} + {{- printf "ds-service-files" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a pvc object should be created +*/}} +{{- define "ds.pvc.create" -}} +{{- if empty .Values.persistence.existingClaim }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the license name +*/}} +{{- define "ds.license.secretName" -}} +{{- if .Values.license.existingSecret -}} + {{- printf "%s" (tpl .Values.license.existingSecret $) -}} +{{- else }} + {{- printf "license" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for license +*/}} +{{- define "ds.license.createSecret" -}} +{{- if and (empty .Values.license.existingSecret) (empty .Values.license.existingClaim) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the jwt name +*/}} +{{- define "ds.jwt.secretName" -}} +{{- if .Values.jwt.existingSecret -}} + {{- printf "%s" (tpl .Values.jwt.existingSecret $) -}} +{{- else }} + {{- printf "jwt" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created for jwt +*/}} +{{- define "ds.jwt.createSecret" -}} +{{- if empty .Values.jwt.existingSecret }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the service name for ds +*/}} +{{- define "ds.svc.name" -}} +{{- if .Values.service.existing -}} + {{- printf "%s" (tpl .Values.service.existing $) -}} +{{- else }} + {{- printf "documentserver" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a service object should be created for ds +*/}} +{{- define "ds.svc.create" -}} +{{- if empty .Values.service.existing }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the ds labels +*/}} +{{- define "ds.labels.commonLabels" -}} +{{- range $key, $value := .Values.commonLabels }} +{{ $key }}: {{ tpl $value $ }} +{{- end }} +{{- end -}} + +{{/* +Get the ds annotations +*/}} +{{- define "ds.annotations.commonAnnotations" -}} +{{- $annotations := toYaml .keyName }} +{{- if contains "{{" $annotations }} + {{- tpl $annotations .context }} +{{- else }} + {{- $annotations }} +{{- end }} +{{- end -}} + +{{/* +Get the ds Service Account name +*/}} +{{- define "ds.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default .Release.Name .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the ds Namespace +*/}} +{{- define "ds.namespace" -}} +{{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Get the ds Grafana Namespace +*/}} +{{- define "ds.grafana.namespace" -}} +{{- if .Values.grafana.namespace -}} + {{- .Values.grafana.namespace -}} +{{- else if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} +{{- end -}} + +{{/* +Get the ds virtual path +*/}} +{{- define "ds.ingress.path" -}} +{{- if eq .Values.ingress.path "/" -}} + {{- printf "/" -}} +{{- else }} + {{- printf "%s(/|$)(.*)" .Values.ingress.path -}} +{{- end -}} +{{- end -}} + +{{/* +Get ds url for example +*/}} +{{- define "ds.example.dsUrl" -}} +{{- if and (ne .Values.ingress.path "/") (eq .Values.example.dsUrl "/") -}} + {{- printf "%s/" (tpl .Values.ingress.path $) -}} +{{- else }} + {{- printf "%s" (tpl .Values.example.dsUrl $) -}} +{{- end -}} +{{- end -}} diff --git a/templates/configmaps/add-shardkey.yaml b/templates/configmaps/add-shardkey.yaml new file mode 100644 index 0000000..9eeacb2 --- /dev/null +++ b/templates/configmaps/add-shardkey.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: add-shardkey + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ (.Files.Glob "sources/scripts/add_shardkey.py").AsConfig | indent 2 }} diff --git a/templates/configmaps/balancer-lua.yaml b/templates/configmaps/balancer-lua.yaml new file mode 100644 index 0000000..344b366 --- /dev/null +++ b/templates/configmaps/balancer-lua.yaml @@ -0,0 +1,395 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: balancer-lua + namespace: {{ .Values.documentserver.ingressCustomConfigMapsNamespace }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + helm.sh/resource-policy: keep + helm.sh/hook: pre-install + helm.sh/hook-weight: "1" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + balancer.lua: | + local ngx_balancer = require("ngx.balancer") + local cjson = require("cjson.safe") + local util = require("util") + local dns_lookup = require("util.dns").lookup + local configuration = require("configuration") + local round_robin = require("balancer.round_robin") + local chash = require("balancer.chash") + local chashsubset = require("balancer.chashsubset") + local sticky_balanced = require("balancer.sticky_balanced") + local sticky_persistent = require("balancer.sticky_persistent") + local ewma = require("balancer.ewma") + local string = string + local ipairs = ipairs + local table = table + local getmetatable = getmetatable + local tostring = tostring + local pairs = pairs + local math = math + local ngx = ngx + + -- measured in seconds + -- for an Nginx worker to pick up the new list of upstream peers + -- it will take + BACKENDS_SYNC_INTERVAL + local BACKENDS_SYNC_INTERVAL = 1 + + local DEFAULT_LB_ALG = "round_robin" + local IMPLEMENTATIONS = { + round_robin = round_robin, + chash = chash, + chashsubset = chashsubset, + sticky_balanced = sticky_balanced, + sticky_persistent = sticky_persistent, + ewma = ewma, + } + + local PROHIBITED_LOCALHOST_PORT = configuration.prohibited_localhost_port or '10246' + local PROHIBITED_PEER_PATTERN = "^127.*:" .. PROHIBITED_LOCALHOST_PORT .. "$" + + local _M = {} + local balancers = {} + local backends_with_external_name = {} + local backends_last_synced_at = 0 + + local function get_implementation(backend) + local name = backend["load-balance"] or DEFAULT_LB_ALG + + if backend["sessionAffinityConfig"] and + backend["sessionAffinityConfig"]["name"] == "cookie" then + if backend["sessionAffinityConfig"]["mode"] == "persistent" then + name = "sticky_persistent" + else + name = "sticky_balanced" + end + + elseif backend["upstreamHashByConfig"] and + backend["upstreamHashByConfig"]["upstream-hash-by"] then + if backend["upstreamHashByConfig"]["upstream-hash-by-subset"] then + name = "chashsubset" + else + name = "chash" + end + end + + local implementation = IMPLEMENTATIONS[name] + if not implementation then + ngx.log(ngx.WARN, backend["load-balance"], " is not supported, ", + "falling back to ", DEFAULT_LB_ALG) + implementation = IMPLEMENTATIONS[DEFAULT_LB_ALG] + end + + return implementation + end + + local function resolve_external_names(original_backend) + local backend = util.deepcopy(original_backend) + local endpoints = {} + for _, endpoint in ipairs(backend.endpoints) do + local ips = dns_lookup(endpoint.address) + for _, ip in ipairs(ips) do + table.insert(endpoints, { address = ip, port = endpoint.port }) + end + end + backend.endpoints = endpoints + return backend + end + + local function format_ipv6_endpoints(endpoints) + local formatted_endpoints = {} + for _, endpoint in ipairs(endpoints) do + local formatted_endpoint = endpoint + if not endpoint.address:match("^%d+.%d+.%d+.%d+$") then + formatted_endpoint.address = string.format("[%s]", endpoint.address) + end + table.insert(formatted_endpoints, formatted_endpoint) + end + return formatted_endpoints + end + + local function is_backend_with_external_name(backend) + local serv_type = backend.service and backend.service.spec + and backend.service.spec["type"] + return serv_type == "ExternalName" + end + + local function sync_backend(backend) + if not backend.endpoints or #backend.endpoints == 0 then + balancers[backend.name] = nil + return + end + + if is_backend_with_external_name(backend) then + backend = resolve_external_names(backend) + end + + backend.endpoints = format_ipv6_endpoints(backend.endpoints) + + local implementation = get_implementation(backend) + local balancer = balancers[backend.name] + + if not balancer then + balancers[backend.name] = implementation:new(backend) + return + end + + -- every implementation is the metatable of its instances (see .new(...) functions) + -- here we check if `balancer` is the instance of `implementation` + -- if it is not then we deduce LB algorithm has changed for the backend + if getmetatable(balancer) ~= implementation then + ngx.log(ngx.INFO, + string.format("LB algorithm changed from %s to %s, resetting the instance", + balancer.name, implementation.name)) + balancers[backend.name] = implementation:new(backend) + return + end + + balancer:sync(backend) + end + + local function sync_backends_with_external_name() + for _, backend_with_external_name in pairs(backends_with_external_name) do + sync_backend(backend_with_external_name) + end + end + + local function sync_backends() + local raw_backends_last_synced_at = configuration.get_raw_backends_last_synced_at() + if raw_backends_last_synced_at <= backends_last_synced_at then + return + end + + local backends_data = configuration.get_backends_data() + if not backends_data then + balancers = {} + return + end + + local new_backends, err = cjson.decode(backends_data) + if not new_backends then + ngx.log(ngx.ERR, "could not parse backends data: ", err) + return + end + + local balancers_to_keep = {} + for _, new_backend in ipairs(new_backends) do + if is_backend_with_external_name(new_backend) then + local backend_with_external_name = util.deepcopy(new_backend) + backends_with_external_name[backend_with_external_name.name] = backend_with_external_name + else + sync_backend(new_backend) + end + balancers_to_keep[new_backend.name] = true + end + + for backend_name, _ in pairs(balancers) do + if not balancers_to_keep[backend_name] then + balancers[backend_name] = nil + backends_with_external_name[backend_name] = nil + end + end + backends_last_synced_at = raw_backends_last_synced_at + end + + local function route_to_alternative_balancer(balancer) + if balancer.is_affinitized(balancer) then + -- If request is already affinitized to a primary balancer, keep the primary balancer. + return false + end + + if not balancer.alternative_backends then + return false + end + + -- TODO: support traffic shaping for n > 1 alternative backends + local backend_name = balancer.alternative_backends[1] + if not backend_name then + ngx.log(ngx.ERR, "empty alternative backend") + return false + end + + local alternative_balancer = balancers[backend_name] + if not alternative_balancer then + ngx.log(ngx.ERR, "no alternative balancer for backend: ", + tostring(backend_name)) + return false + end + + if alternative_balancer.is_affinitized(alternative_balancer) then + -- If request is affinitized to an alternative balancer, instruct caller to + -- switch to alternative. + return true + end + + -- Use traffic shaping policy, if request didn't have affinity set. + local traffic_shaping_policy = alternative_balancer.traffic_shaping_policy + if not traffic_shaping_policy then + ngx.log(ngx.ERR, "traffic shaping policy is not set for balancer ", + "of backend: ", tostring(backend_name)) + return false + end + + local target_header = util.replace_special_char(traffic_shaping_policy.header, + "-", "_") + local header = ngx.var["http_" .. target_header] + if header then + if traffic_shaping_policy.headerValue + and #traffic_shaping_policy.headerValue > 0 then + if traffic_shaping_policy.headerValue == header then + return true + end + elseif traffic_shaping_policy.headerPattern + and #traffic_shaping_policy.headerPattern > 0 then + local m, err = ngx.re.match(header, traffic_shaping_policy.headerPattern) + if m then + return true + elseif err then + ngx.log(ngx.ERR, "error when matching canary-by-header-pattern: '", + traffic_shaping_policy.headerPattern, "', error: ", err) + return false + end + elseif header == "always" then + return true + elseif header == "never" then + return false + end + end + + local target_cookie = traffic_shaping_policy.cookie + local cookie = ngx.var["cookie_" .. target_cookie] + if cookie then + if cookie == "always" then + return true + elseif cookie == "never" then + return false + end + end + + local weightTotal = 100 + if traffic_shaping_policy.weightTotal ~= nil and traffic_shaping_policy.weightTotal > 100 then + weightTotal = traffic_shaping_policy.weightTotal + end + if math.random(weightTotal) <= traffic_shaping_policy.weight then + return true + end + + return false + end + + local function get_balancer_by_upstream_name(upstream_name) + return balancers[upstream_name] + end + + local function get_balancer() + if ngx.ctx.balancer then + return ngx.ctx.balancer + end + + local backend_name = ngx.var.proxy_upstream_name + + local balancer = balancers[backend_name] + if not balancer then + return nil + end + + if route_to_alternative_balancer(balancer) then + local alternative_backend_name = balancer.alternative_backends[1] + ngx.var.proxy_alternative_upstream_name = alternative_backend_name + + balancer = balancers[alternative_backend_name] + end + + ngx.ctx.balancer = balancer + + return balancer + end + + function _M.init_worker() + -- when worker starts, sync non ExternalName backends without delay + sync_backends() + -- we call sync_backends_with_external_name in timer because for endpoints that require + -- DNS resolution it needs to use socket which is not available in + -- init_worker phase + local ok, err = ngx.timer.at(0, sync_backends_with_external_name) + if not ok then + ngx.log(ngx.ERR, "failed to create timer: ", err) + end + + ok, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends) + if not ok then + ngx.log(ngx.ERR, "error when setting up timer.every for sync_backends: ", err) + end + ok, err = ngx.timer.every(BACKENDS_SYNC_INTERVAL, sync_backends_with_external_name) + if not ok then + ngx.log(ngx.ERR, "error when setting up timer.every for sync_backends_with_external_name: ", + err) + end + end + + function _M.rewrite() + local balancer = get_balancer() + if not balancer then + ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE + return ngx.exit(ngx.status) + end + end + + function _M.balance() + local balancer = get_balancer() + if not balancer then + return + end + + local peer = balancer:balance() + if not peer then + ngx.log(ngx.WARN, "no peer was returned, balancer: " .. balancer.name) + return + end + + if peer:match(PROHIBITED_PEER_PATTERN) then + ngx.log(ngx.ERR, "attempted to proxy to self, balancer: ", balancer.name, ", peer: ", peer) + return + end + + ngx_balancer.set_more_tries(1) + if (ngx.var.service_name == {{ default "documentserver" .Values.service.existing | quote }} and (ngx.var.arg_WOPISrc or ngx.var.arg_shardkey)) then + return peer + else + local ok, err = ngx_balancer.set_current_peer(peer) + if not ok then + ngx.log(ngx.ERR, "error while setting current upstream peer ", peer, + ": ", err) + end + end + end + + function _M.log() + local balancer = get_balancer() + if not balancer then + return + end + + if not balancer.after_balance then + return + end + + balancer:after_balance() + end + + setmetatable(_M, {__index = { + get_implementation = get_implementation, + sync_backend = sync_backend, + route_to_alternative_balancer = route_to_alternative_balancer, + get_balancer = get_balancer, + get_balancer_by_upstream_name = get_balancer_by_upstream_name, + }}) + + return _M diff --git a/templates/configmaps/balancer-snippet.yaml b/templates/configmaps/balancer-snippet.yaml new file mode 100644 index 0000000..1effff7 --- /dev/null +++ b/templates/configmaps/balancer-snippet.yaml @@ -0,0 +1,230 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: balancer-snippet + namespace: {{ .Values.documentserver.ingressCustomConfigMapsNamespace }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + helm.sh/resource-policy: keep + helm.sh/hook: pre-install + helm.sh/hook-weight: "1" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom_balancer.conf: | + access_by_lua_block { + if ngx.var.service_name == {{ default "documentserver" .Values.service.existing | quote }} then + local WOPISrc = ngx.var.arg_WOPISrc + local shardkey = ngx.var.arg_shardkey + local service_name = ngx.var.service_name + local redis = require "resty.redis" + local red = redis:new() + local cjson = require("cjson.safe") + + local function redis_del(key) + local ok, err = red:del(string.format("%s", key)) + if not ok then + ngx.say("failed to del: ", err) + return + end + end + + local function redis_get(wopi) + local user_data_response = red:get(string.format('%s', wopi)) + return user_data_response + end + + local function redis_set(wopi, endpoint) + local response = red:setnx(string.format('%s', wopi), endpoint) + if response == 1 then + print(string.format("DEBUG: --> New api key %s was set in Redis", wopi)) + return true + end + end + + local function redis_set_ipkey(wopi, endpoint) + -- add spaces before add value in key + -- keys should be like " wopi1 wopi2 wopi3" + -- if dont make the space it will be "wopi1wopi2wopi3" + + local wopi_final = (string.format(" %s", wopi)) + local ok, err = red:append(string.format('%s', endpoint), wopi_final) + if not ok then + ngx.say("failed to set: ",err) + return + end + end + + local function redis_expire(wopi, expire) + local ok, err = red:expire(string.format('%s', wopi), expire) + if not ok then + ngx.say("failed to set ttl: ",err) + return + end + end + + local function get_endpoints(backends, upstream) + for _, new_backend in ipairs(backends) do + if new_backend.name == upstream then + local new_endpoints=(new_backend.endpoints) + return new_endpoints + end + end + end + + local function table_contains(tbl, p, x) + local found = false + for _, v in pairs(tbl) do + local endpoint_string = (string.format("%s:%s", v, p)) + if endpoint_string == x then + local found = true + return found + end + end + return found + end + + local function check_endpoint(endpoint) + local configuration = require("configuration") + local docs_upstream = ngx.var.proxy_upstream_name + local service_port = ngx.var.service_port + local backends_data = configuration.get_backends_data() + local backends = cjson.decode(backends_data) + local endpoints = get_endpoints(backends, docs_upstream) + local endpoints_table = {} + + for _, endpoint in ipairs(endpoints) do + table.insert(endpoints_table, endpoint.address) + end + print(cjson.encode(endpoints_table)) + + local result = table_contains(endpoints_table, service_port, endpoint) + + return result + end + + local function get_docs_mode(wopi) + if string.match(wopi, "http://") or string.match(wopi, "https://") then + return "wopi" + else + return "api" + end + end + + local function get_api_arg() + if WOPISrc then + return WOPISrc + end + if shardkey then + return shardkey + end + end + + local function handle_api_key(arg) + if shardkey then + return shardkey + end + if WOPISrc then + local WOPIDecoded = (ngx.unescape_uri(arg)) + local WOPIkey = WOPIDecoded:gsub("%s+", "") + return WOPIkey + end + end + + local API_ARG = get_api_arg() + + if API_ARG then + local API_KEY = handle_api_key(API_ARG) + red:set_timeouts(1000, 1000, 1000) -- 1 sec + local ok, err = red:connect("{{ include "ds.redis.subchart.prefix" . }}{{ .Values.connections.redisHost }}", {{ .Values.connections.redisPort }}) + if not ok then + ngx.say("1: failed to connect: ",err) + return + end + + {{- if eq .Values.connections.redisNoPass false }} + local res, err = red:auth({{ include "ds.redis.pass" . | quote }}) + if not res then + ngx.say("failed to authenticate: ", err) + return + end + {{- end }} + + {{- if ne .Values.documentserver.keysRedisDBNum "0" }} + red:select({{ .Values.documentserver.keysRedisDBNum }}) + {{- end }} + + local exist_endpoint = tostring(redis_get(API_KEY)) + print(exist_endpoint) + if exist_endpoint == 'userdata: NULL' then + local new_custom_endpoint = balancer.balance() + if redis_set(API_KEY, new_custom_endpoint) then + redis_set_ipkey(API_KEY, new_custom_endpoint) + redis_expire(API_KEY, {{ .Values.documentserver.keysExpireTime }}) + ngx.var.custom_endpoint = new_custom_endpoint + else + print("DEBUG: --> Looks like parallel request was made, get endpoint from Redis") + ngx.var.custom_endpoint = tostring(redis_get(API_KEY)) + end + else + local endpoint_found = check_endpoint(exist_endpoint) + print(endpoint_found) + if endpoint_found == false then + print(string.format("ENDPOINT WILL BE REMOVE:%s", exist_endpoint)) + local placeholder = tostring(red:get(string.format("del_%s", exist_endpoint))) + if placeholder == 'userdata: NULL' then + local default_expire = {{ .Values.documentserver.terminationGracePeriodSeconds }} + local placeholder_expire = default_expire + 10 + local set_placeholder = red:set(string.format("del_%s", exist_endpoint), "placeholder") + local set_placeholder_expire = red:expire(string.format("del_%s", exist_endpoint), placeholder_expire) + local keys = tostring(redis_get(exist_endpoint)) + red:init_pipeline() + for i in string.gmatch(keys, "%S+") do + red:expire(string.format('%s', i), default_expire) + end + local results, err = red:commit_pipeline() + if not results then + ngx.say("failed to commit the pipelined requests: ", err) + end + -- Set expire for endpoint key that consist all wopi that also will be removed after expire + local set_endpoint_expire = redis_expire(exist_endpoint, default_expire) + local set_wopi_expire = redis_expire(API_KEY, default_expire) + print("DEBUG: --> Keys remove process is started, send request to upstream") + ngx.var.custom_endpoint = exist_endpoint + else + print("DEBUG: --> Process that reshard keys already exist, send request to upstream") + ngx.var.custom_endpoint = exist_endpoint + end + else + print("DEGUB: --> Endpoint exist, just go forward...") + ngx.var.custom_endpoint = exist_endpoint + if WOPISrc then + redis_expire(API_KEY, {{ .Values.documentserver.keysExpireTime }}) + end + end + end + end + print(ngx.var.custom_endpoint) + red:close() + end + } + + if ($service_name = {{ default "documentserver" .Values.service.existing }}) { + set $docs_shardkey $arg_shardkey; + } + + if ($service_name = {{ default "documentserver" .Values.service.existing }}) { + set $docs_wopisrc $arg_WOPISrc; + } + + if ($docs_shardkey) { + proxy_pass http://$custom_endpoint; + } + + if ($docs_wopisrc) { + proxy_pass http://$custom_endpoint; + } diff --git a/templates/configmaps/config.yaml b/templates/configmaps/config.yaml new file mode 100644 index 0000000..f3664a9 --- /dev/null +++ b/templates/configmaps/config.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-config + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + local.json: | + { + "services": { + "CoAuthoring": { + "server":{ + "editorDataStorage": "editorDataMemory", + "editorStatStorage": "editorDataRedis" + } + } + } + } diff --git a/templates/configmaps/createdb.yaml b/templates/configmaps/createdb.yaml new file mode 100644 index 0000000..d11ec84 --- /dev/null +++ b/templates/configmaps/createdb.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: createdb + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ (.Files.Glob "sources/scripts/createdb.sql").AsConfig | indent 2 }} diff --git a/templates/configmaps/dashboard.yaml b/templates/configmaps/dashboard.yaml new file mode 100644 index 0000000..962dd3f --- /dev/null +++ b/templates/configmaps/dashboard.yaml @@ -0,0 +1,37 @@ +{{- if and .Values.grafana.enabled .Values.grafana.dashboard.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-dashboard + namespace: {{ include "ds.grafana.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "4" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + get_dashboard.sh: |- + #!/bin/bash + CM=(dashboard-node-exporter dashboard-deployment dashboard-nginx-ingress dashboard-documentserver dashboard-cluster-resourses) + wget https://grafana.com/api/dashboards/1860/revisions/22/download -O /scripts/dashboard-node-exporter.json + wget https://grafana.com/api/dashboards/8588/revisions/1/download -O /scripts/dashboard-deployment.json + wget https://grafana.com/api/dashboards/9614/revisions/1/download -O /scripts/dashboard-nginx-ingress.json + wget https://raw.githubusercontent.com/ONLYOFFICE/Kubernetes-Docs/master/sources/metrics/documentserver-statsd-exporter.json -O /scripts/dashboard-documentserver.json + wget https://raw.githubusercontent.com/ONLYOFFICE/Kubernetes-Docs/master/sources/metrics/kubernetes-cluster-resourses.json -O /scripts/dashboard-cluster-resourses.json + sed -i 's/${DS_PROMETHEUS}/Prometheus/g' /scripts/*.json + sed -i 's/$DS_PROMETHEUS/Prometheus/g' /scripts/*.json + for i in "${CM[@]}"; do + kubectl get cm ${i} + if [[ "$?" -ne 0 ]]; then + kubectl create configmap ${i} --from-file=/scripts/${i}.json + else + echo -e "\e[0;32m ConfigMap ${i} already exists. Skipping ... \e[0m" + fi + done +{{- end }} diff --git a/templates/configmaps/documentserver.yaml b/templates/configmaps/documentserver.yaml new file mode 100644 index 0000000..9da380c --- /dev/null +++ b/templates/configmaps/documentserver.yaml @@ -0,0 +1,71 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: documentserver + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + DB_TYPE: "postgres" + DB_USER: "postgres" + DB_HOST: "localhost" + DB_PORT: "5432" + DB_NAME: "postgres" + REDIS_CONNECTOR_NAME: {{ .Values.connections.redisConnectorName }} + REDIS_SERVER_HOST: {{ include "ds.redis.subchart.prefix" . }}{{ .Values.connections.redisHost }} + REDIS_SERVER_PORT: {{ .Values.connections.redisPort | quote }} + REDIS_SERVER_USER: {{ .Values.connections.redisUser }} + REDIS_SERVER_DB_NUM: {{ .Values.connections.redisDBNum | quote }} + {{- if.Values.connections.redisClusterNodes }} + REDIS_CLUSTER_NODES: {{ join " " .Values.connections.redisClusterNodes }} + {{- end }} + {{- if eq .Values.connections.redisConnectorName "ioredis" }} + REDIS_SENTINEL_GROUP_NAME: {{ .Values.connections.redisSentinelGroupName }} + {{- end }} + AMQP_TYPE: "rabbitmq" + AMQP_PORT: "5672" + AMQP_VHOST: "/" + AMQP_HOST: "localhost" + AMQP_USER: "guest" + AMQP_PROTO: "amqp" + METRICS_ENABLED: {{ .Values.metrics.enabled | quote }} + {{- if .Values.metrics.enabled }} + METRICS_HOST: {{ .Values.metrics.host }} + METRICS_PORT: {{ .Values.metrics.port | quote }} + METRICS_PREFIX: {{ .Values.metrics.prefix }} + {{- end }} + LOG_LEVEL: {{ .Values.log.level }} + LOG_TYPE: {{ .Values.log.type }} + LOG_PATTERN: {{ .Values.log.pattern | quote }} + NGINX_ACCESS_LOG: {{ .Values.documentserver.proxy.accessLog | quote }} + NGINX_GZIP_PROXIED: {{ .Values.documentserver.proxy.gzipProxied | quote }} + NGINX_WORKER_CONNECTIONS: {{ .Values.documentserver.proxy.workerConnections | quote }} + NGINX_WORKER_PROCESSES: {{ .Values.documentserver.proxy.workerProcesses | quote }} + SECURE_LINK_SECRET: {{ .Values.documentserver.proxy.secureLinkSecret | quote }} + {{- if .Values.example.enabled }} + EXAMPLE_HOST_PORT: example:3000 + {{- end }} + WOPI_ENABLED: {{ .Values.wopi.enabled | quote }} + {{- if .Values.webProxy.enabled }} + http_proxy: {{ .Values.webProxy.http | quote }} + https_proxy: {{ .Values.webProxy.https | quote }} + no_proxy: {{ .Values.webProxy.noProxy | quote }} + {{- end }} + {{- if typeIs "bool" .Values.requestFilteringAgent.allowPrivateIPAddress }} + ALLOW_PRIVATE_IP_ADDRESS: {{ .Values.requestFilteringAgent.allowPrivateIPAddress | quote }} + {{- end }} + {{- if typeIs "bool" .Values.requestFilteringAgent.allowMetaIPAddress }} + ALLOW_META_IP_ADDRESS: {{ .Values.requestFilteringAgent.allowMetaIPAddress | quote }} + {{- end }} + {{- if .Values.requestFilteringAgent.allowIPAddressList }} + ALLOW_IP_ADDRESS_LIST: {{ toJson .Values.requestFilteringAgent.allowIPAddressList | squote }} + {{- end }} + {{- if .Values.requestFilteringAgent.denyIPAddressList }} + DENY_IP_ADDRESS_LIST: {{ toJson .Values.requestFilteringAgent.denyIPAddressList | squote }} + {{- end }} + STORAGE_SUBDIRECTORY_NAME: {{ .Values.documentserver.docservice.image.tag }} diff --git a/templates/configmaps/example.yaml b/templates/configmaps/example.yaml new file mode 100644 index 0000000..79f39b0 --- /dev/null +++ b/templates/configmaps/example.yaml @@ -0,0 +1,16 @@ +{{- if .Values.example.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: example + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + DS_URL: {{ include "ds.example.dsUrl" . }} +{{- end }} diff --git a/templates/configmaps/grafana.yaml b/templates/configmaps/grafana.yaml new file mode 100644 index 0000000..2dc393e --- /dev/null +++ b/templates/configmaps/grafana.yaml @@ -0,0 +1,19 @@ +{{- if .Values.grafana.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-ini + namespace: {{ include "ds.grafana.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + grafana.ini: |- + [server] + root_url = %(protocol)s://%(domain)s:%(http_port)s/grafana/ + serve_from_sub_path = true +{{- end }} diff --git a/templates/configmaps/remove-shardkey.yaml b/templates/configmaps/remove-shardkey.yaml new file mode 100644 index 0000000..16b44bc --- /dev/null +++ b/templates/configmaps/remove-shardkey.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: remove-shardkey + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ (.Files.Glob "sources/scripts/remove_shardkey.py").AsConfig | indent 2 }} diff --git a/templates/configmaps/welcome-page.yaml b/templates/configmaps/welcome-page.yaml new file mode 100644 index 0000000..75c8e49 --- /dev/null +++ b/templates/configmaps/welcome-page.yaml @@ -0,0 +1,23 @@ +{{- if not .Values.documentserver.proxy.welcomePage.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: ds-welcome-page + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + ds-example.conf: |- + location /example/ { + proxy_pass http://example/; + proxy_set_header X-Forwarded-Host $the_host; + proxy_set_header X-Forwarded-Proto $the_scheme; + proxy_set_header X-Forwarded-Path /example; + proxy_set_header X-Forwarded-For example.com; + } +{{- end }} diff --git a/templates/deployments/documentserver.yaml b/templates/deployments/documentserver.yaml new file mode 100644 index 0000000..13c7bb5 --- /dev/null +++ b/templates/deployments/documentserver.yaml @@ -0,0 +1,402 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: documentserver + namespace: {{ include "ds.namespace" . | quote }} + labels: + app: documentserver + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.documentserver.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.documentserver.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.documentserver.autoscaling.enabled }} + replicas: {{ .Values.documentserver.replicas }} + {{- end }} + selector: + matchLabels: + app: documentserver + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 6 }} + {{- end }} + {{- if .Values.documentserver.updateStrategy }} + strategy: {{- toYaml .Values.documentserver.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + labels: + app: documentserver + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 8 }} + {{- end }} + {{- if .Values.documentserver.podAnnotations }} + annotations: + {{- range $key, $value := .Values.documentserver.podAnnotations }} + {{ $key }}: {{ tpl $value $ }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "ds.serviceAccountName" . }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + affinity: + podAntiAffinity: + {{- if eq .Values.podAntiAffinity.type "soft" }} + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - documentserver + topologyKey: {{ .Values.podAntiAffinity.topologyKey }} + weight: {{ .Values.podAntiAffinity.weight }} + {{- else if eq .Values.podAntiAffinity.type "hard" }} + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - documentserver + topologyKey: {{ .Values.podAntiAffinity.topologyKey }} + {{- end }} + {{- with .Values.documentserver.customPodAntiAffinity }} + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.documentserver.podAffinity }} + podAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.documentserver.nodeAffinity }} + nodeAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- if or .Values.nodeSelector .Values.documentserver.nodeSelector }} + nodeSelector: {{ toYaml (default .Values.nodeSelector .Values.documentserver.nodeSelector) | nindent 8 }} + {{- end }} + {{- if or .Values.tolerations .Values.documentserver.tolerations }} + tolerations: {{ toYaml (default .Values.tolerations .Values.documentserver.tolerations) | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.documentserver.terminationGracePeriodSeconds }} + initContainers: + - name: add-shardkey + image: {{ .Values.documentserver.initContainers.image.repository }}:{{ .Values.documentserver.initContainers.image.tag }} + imagePullPolicy: {{ .Values.documentserver.initContainers.image.pullPolicy }} + {{- if .Values.documentserver.initContainers.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.documentserver.initContainers.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.documentserver.initContainers.resources | nindent 12 }} + env: + - name: REDIS_SERVER_DB_KEYS_NUM + value: {{ .Values.documentserver.keysRedisDBNum | quote }} + - name: REDIS_SERVER_PWD + valueFrom: + secretKeyRef: + name: {{ template "ds.redis.secretName" . }} + key: {{ .Values.connections.redisSecretKeyName }} + - name: DEFAULT_SHARD_KEY + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SHARD_PORT + value: {{ .Values.documentserver.proxy.containerPorts.http | quote }} + envFrom: + - configMapRef: + name: documentserver + command: ['python', '/scripts/add_shardkey.py'] + volumeMounts: + - name: add-shardkey + mountPath: /scripts/add_shardkey.py + subPath: add_shardkey.py + {{- with .Values.documentserver.initContainers.custom }} + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: proxy + image: {{ .Values.documentserver.proxy.image.repository }}:{{ .Values.documentserver.proxy.image.tag }} + imagePullPolicy: {{ .Values.documentserver.proxy.image.pullPolicy }} + {{- if .Values.documentserver.proxy.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.documentserver.proxy.containerSecurityContext.enabled "enabled" | toYaml | nindent 12 }} + {{- end }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "sleep {{ .Values.documentserver.terminationGracePeriodSeconds }}"] + ports: + - containerPort: {{ .Values.documentserver.proxy.containerPorts.http }} + {{- if .Values.documentserver.proxy.startupProbe.enabled }} + startupProbe: {{- omit .Values.documentserver.proxy.startupProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.documentserver.proxy.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.documentserver.proxy.readinessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.documentserver.proxy.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.documentserver.proxy.livenessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: {{ toYaml .Values.documentserver.proxy.resources | nindent 12 }} + {{- if or .Values.documentserver.proxy.infoAllowedIP .Values.documentserver.proxy.infoAllowedUser }} + env: + - name: DEFAULT_SHARD_KEY + valueFrom: + fieldRef: + fieldPath: metadata.name + {{- if .Values.documentserver.proxy.infoAllowedIP }} + - name: INFO_ALLOWED_IP + value: {{ join " " .Values.documentserver.proxy.infoAllowedIP }} + {{- end }} + {{- if .Values.documentserver.proxy.infoAllowedUser }} + - name: INFO_ALLOWED_USER + value: {{ .Values.documentserver.proxy.infoAllowedUser }} + - name: INFO_ALLOWED_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "ds.info.secretName" . }} + key: {{ .Values.documentserver.proxy.infoAllowedSecretKeyName }} + {{- end }} + {{- end }} + envFrom: + - configMapRef: + name: documentserver + volumeMounts: + - name: ds-files + mountPath: /var/lib/{{ .Values.product.name }}/documentserver/App_Data/cache/files/{{ .Values.documentserver.docservice.image.tag }} + - name: ds-service-files + mountPath: /var/lib/{{ .Values.product.name }}/documentserver/App_Data/cache/files + {{- if .Values.extraThemes.configMap }} + - name: custom-themes + mountPath: /var/www/{{ .Values.product.name }}/documentserver/web-apps/apps/common/main/resources/themes/{{ .Values.extraThemes.filename }} + subPath: {{ .Values.extraThemes.filename }} + {{- end }} + {{- if not .Values.documentserver.proxy.welcomePage.enabled }} + - name: ds-welcome-page + mountPath: /etc/nginx/includes/ds-example.conf + subPath: ds-example.conf + {{- end }} + + - name: docservice + image: {{ .Values.documentserver.docservice.image.repository }}:{{ .Values.documentserver.docservice.image.tag }} + imagePullPolicy: {{ .Values.documentserver.docservice.image.pullPolicy }} + {{- if .Values.documentserver.docservice.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.documentserver.docservice.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + lifecycle: + preStop: + exec: + command: ["python", "/var/lib/{{ .Values.product.name }}/documentserver/scripts/remove_shardkey.py"] + ports: + - containerPort: {{ .Values.documentserver.docservice.containerPorts.http }} + {{- if .Values.documentserver.docservice.startupProbe.enabled }} + startupProbe: {{- omit .Values.documentserver.docservice.startupProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.documentserver.docservice.readinessProbe.enabled }} + readinessProbe: {{- omit .Values.documentserver.docservice.readinessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.documentserver.docservice.livenessProbe.enabled }} + livenessProbe: {{- omit .Values.documentserver.docservice.livenessProbe "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: {{ toYaml .Values.documentserver.docservice.resources | nindent 12 }} + env: + - name: DEFAULT_SHARD_KEY + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: SHARD_PORT + value: {{ .Values.documentserver.proxy.containerPorts.http | quote }} + - name: REDIS_SERVER_DB_KEYS_NUM + value: {{ .Values.documentserver.keysRedisDBNum | quote }} + - name: REDIS_SERVER_PWD + valueFrom: + secretKeyRef: + name: {{ template "ds.redis.secretName" . }} + key: {{ .Values.connections.redisSecretKeyName }} + envFrom: + - secretRef: + name: {{ template "ds.jwt.secretName" . }} + - configMapRef: + name: documentserver + volumeMounts: + - name: ds-files + mountPath: /var/lib/{{ .Values.product.name }}/documentserver/App_Data/cache/files/{{ .Values.documentserver.docservice.image.tag }} + - name: ds-service-files + mountPath: /var/lib/{{ .Values.product.name }}/documentserver/App_Data/cache/files + - name: config-file + mountPath: /etc/{{ .Values.product.name }}/documentserver/local.json + subPath: local.json + - name: ds-license + mountPath: /var/www/{{ .Values.product.name }}/Data + {{- if not .Values.license.existingClaim }} + readOnly: true + {{- end }} + {{- if .Values.extraConf.configMap }} + - name: custom-file + mountPath: /etc/{{ .Values.product.name }}/documentserver/{{ .Values.extraConf.filename }} + subPath: {{ .Values.extraConf.filename }} + {{- end }} + {{- if .Values.extraThemes.configMap }} + - name: custom-themes + mountPath: /var/www/{{ .Values.product.name }}/documentserver/web-apps/apps/common/main/resources/themes/{{ .Values.extraThemes.filename }} + subPath: {{ .Values.extraThemes.filename }} + {{- end }} + - name: remove-shardkey + mountPath: /var/lib/{{ .Values.product.name }}/documentserver/scripts/remove_shardkey.py + subPath: remove_shardkey.py + + {{- $context := . }} + {{- $converterCount := (toString .Values.documentserver.converter.count) }} + {{- range $i := until (atoi $converterCount) }} + - name: converter-{{ $i }} + image: {{ $context.Values.documentserver.converter.image.repository }}:{{ $context.Values.documentserver.converter.image.tag }} + imagePullPolicy: {{ $context.Values.documentserver.converter.image.pullPolicy }} + {{- if $context.Values.documentserver.converter.containerSecurityContext.enabled }} + securityContext: {{- omit $context.Values.documentserver.converter.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml $context.Values.documentserver.converter.resources | nindent 12 }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "sleep {{ $context.Values.documentserver.terminationGracePeriodSeconds }}"] + env: + - name: DEFAULT_SHARD_KEY + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: REDIS_SERVER_PWD + valueFrom: + secretKeyRef: + name: {{ template "ds.redis.secretName" $context }} + key: {{ $context.Values.connections.redisSecretKeyName }} + envFrom: + - secretRef: + name: {{ template "ds.jwt.secretName" $context }} + - configMapRef: + name: documentserver + volumeMounts: + - name: ds-files + mountPath: /var/lib/{{ $context.Values.product.name }}/documentserver/App_Data/cache/files/{{ $context.Values.documentserver.docservice.image.tag }} + - name: ds-service-files + mountPath: /var/lib/{{ $context.Values.product.name }}/documentserver/App_Data/cache/files + - name: config-file + mountPath: /etc/{{ $context.Values.product.name }}/documentserver/local.json + subPath: local.json + - name: ds-license + mountPath: /var/www/{{ $context.Values.product.name }}/Data + {{- if not $context.Values.license.existingClaim }} + readOnly: true + {{- end }} + {{- if $context.Values.extraConf.configMap }} + - name: custom-file + mountPath: /etc/{{ $context.Values.product.name }}/documentserver/{{ $context.Values.extraConf.filename }} + subPath: {{ $context.Values.extraConf.filename }} + {{- end }} + {{- if $context.Values.extraThemes.configMap }} + - name: custom-themes + mountPath: /var/www/{{ $context.Values.product.name }}/documentserver/web-apps/apps/common/main/resources/themes/{{ $context.Values.extraThemes.filename }} + subPath: {{ $context.Values.extraThemes.filename }} + {{- end }} + {{- end }} + + - name: postgresql + image: {{ .Values.documentserver.postgresql.image.repository }}:{{ .Values.documentserver.postgresql.image.tag }} + imagePullPolicy: {{ .Values.documentserver.postgresql.image.pullPolicy }} + {{- if .Values.documentserver.postgresql.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.documentserver.postgresql.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.documentserver.postgresql.resources | nindent 12 }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "sleep {{ .Values.documentserver.terminationGracePeriodSeconds }}"] + ports: + - containerPort: {{ .Values.documentserver.postgresql.containerPorts.tcp }} + env: + - name: POSTGRES_DB + valueFrom: + configMapKeyRef: + name: documentserver + key: DB_NAME + - name: POSTGRES_USER + valueFrom: + configMapKeyRef: + name: documentserver + key: DB_USER + - name: POSTGRES_HOST_AUTH_METHOD + value: trust + volumeMounts: + - name: createdb + mountPath: /docker-entrypoint-initdb.d/createdb.sql + subPath: createdb.sql + + - name: rabbitmq + image: {{ .Values.documentserver.rabbitmq.image.repository }}:{{ .Values.documentserver.rabbitmq.image.tag }} + imagePullPolicy: {{ .Values.documentserver.rabbitmq.image.pullPolicy }} + {{- if .Values.documentserver.rabbitmq.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.documentserver.rabbitmq.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.documentserver.rabbitmq.resources | nindent 12 }} + lifecycle: + preStop: + exec: + command: ["/bin/sh", "-c", "sleep {{ .Values.documentserver.terminationGracePeriodSeconds }}"] + ports: + - containerPort: {{ .Values.documentserver.rabbitmq.containerPorts.amqp }} + volumes: + - name: ds-files + emptyDir: {} + - name: ds-service-files + persistentVolumeClaim: + claimName: {{ template "ds.pvc.name" . }} + - name: config-file + configMap: + name: local-config + - name: ds-license + {{- if .Values.license.existingClaim }} + persistentVolumeClaim: + claimName: {{ .Values.license.existingClaim }} + {{- else }} + secret: + secretName: {{ template "ds.license.secretName" . }} + {{- end }} + {{- if .Values.extraConf.configMap }} + - name: custom-file + configMap: + name: {{ .Values.extraConf.configMap }} + {{- end }} + {{- if .Values.extraThemes.configMap }} + - name: custom-themes + configMap: + name: {{ .Values.extraThemes.configMap }} + {{- end }} + {{- if not .Values.documentserver.proxy.welcomePage.enabled }} + - name: ds-welcome-page + configMap: + name: ds-welcome-page + {{- end }} + - name: createdb + configMap: + name: createdb + - name: add-shardkey + configMap: + name: add-shardkey + defaultMode: 0755 + - name: remove-shardkey + configMap: + name: remove-shardkey + defaultMode: 0755 diff --git a/templates/hpa/documentserver.yaml b/templates/hpa/documentserver.yaml new file mode 100644 index 0000000..8f438a9 --- /dev/null +++ b/templates/hpa/documentserver.yaml @@ -0,0 +1,46 @@ +{{- if .Values.documentserver.autoscaling.enabled }} +apiVersion: {{ .Capabilities.APIVersions.Has "autoscaling/v2" | ternary "autoscaling/v2" "autoscaling/v2beta2" }} +kind: HorizontalPodAutoscaler +metadata: + name: documentserver-hpa + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.documentserver.autoscaling.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.documentserver.autoscaling.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: documentserver + minReplicas: {{ .Values.documentserver.autoscaling.minReplicas }} + maxReplicas: {{ .Values.documentserver.autoscaling.maxReplicas }} + metrics: + {{- if .Values.documentserver.autoscaling.targetCPU.enabled }} + - type: Resource + resource: + name: cpu + target: + type: Utilization + averageUtilization: {{ .Values.documentserver.autoscaling.targetCPU.utilizationPercentage }} + {{- end }} + {{- if .Values.documentserver.autoscaling.targetMemory.enabled }} + - type: Resource + resource: + name: memory + target: + type: Utilization + averageUtilization: {{ .Values.documentserver.autoscaling.targetMemory.utilizationPercentage }} + {{- end }} + {{- with .Values.documentserver.autoscaling.customMetricsType }} + {{- toYaml . | nindent 2 }} + {{- end }} + {{- with .Values.documentserver.autoscaling.behavior }} + behavior: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/templates/ingresses/documentserver.yaml b/templates/ingresses/documentserver.yaml new file mode 100644 index 0000000..b779294 --- /dev/null +++ b/templates/ingresses/documentserver.yaml @@ -0,0 +1,48 @@ +{{- if .Values.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: documentserver + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + nginx.ingress.kubernetes.io/configuration-snippet: | + set $custom_endpoint ''; + include /etc/nginx/custom_balancer.conf; + {{- if or .Values.commonAnnotations .Values.ingress.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.ingress.annotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} + {{- if not (eq .Values.ingress.path "/") }} + nginx.ingress.kubernetes.io/x-forwarded-prefix: {{ .Values.ingress.path }} + nginx.ingress.kubernetes.io/rewrite-target: /$2 + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + {{- if .Values.ingress.ssl.enabled }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.ssl.secret }} + {{- end }} + rules: + {{- if .Values.ingress.host }} + - host: {{ .Values.ingress.host }} + {{- else }} + - host: + {{- end }} + http: + paths: + - path: {{ template "ds.ingress.path" . }} + pathType: Prefix + backend: + service: + name: {{ template "ds.svc.name" . }} + port: + number: {{ .Values.service.port }} +{{- end }} diff --git a/templates/ingresses/grafana.yaml b/templates/ingresses/grafana.yaml new file mode 100644 index 0000000..a5e0889 --- /dev/null +++ b/templates/ingresses/grafana.yaml @@ -0,0 +1,40 @@ +{{- if and .Values.grafana.enabled .Values.grafana.ingress.enabled }} +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: grafana + namespace: {{ include "ds.grafana.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.grafana.ingress.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.grafana.ingress.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + {{- if .Values.ingress.ssl.enabled }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.ssl.secret }} + {{- end }} + rules: + {{- if .Values.ingress.host }} + - host: {{ .Values.ingress.host }} + {{- else }} + - host: + {{- end }} + http: + paths: + - path: /grafana/ + pathType: Prefix + backend: + service: + name: grafana + port: + number: 80 +{{- end }} diff --git a/templates/jobs/dashboard.yaml b/templates/jobs/dashboard.yaml new file mode 100644 index 0000000..55ed859 --- /dev/null +++ b/templates/jobs/dashboard.yaml @@ -0,0 +1,86 @@ +{{- if and .Values.grafana.enabled .Values.grafana.dashboard.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: grafana-dashboard + namespace: {{ include "ds.grafana.namespace" . | quote }} + labels: + app.kubernetes.io/component: grafana-dashboard + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- if or .Values.commonAnnotations .Values.grafanaDashboard.job.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.grafanaDashboard.job.annotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + backoffLimit: 2 + template: + metadata: + labels: + app.kubernetes.io/component: grafana-dashboard + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 8 }} + {{- end }} + {{- if .Values.grafanaDashboard.job.podAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.grafanaDashboard.job.podAnnotations "context" $ ) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: dashboard-sa + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if or .Values.grafanaDashboard.job.customPodAntiAffinity .Values.grafanaDashboard.job.podAffinity .Values.grafanaDashboard.job.nodeAffinity }} + affinity: + {{- with .Values.grafanaDashboard.job.customPodAntiAffinity }} + podAntiAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.grafanaDashboard.job.podAffinity }} + podAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.grafanaDashboard.job.nodeAffinity }} + nodeAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- if or .Values.nodeSelector .Values.grafanaDashboard.job.nodeSelector }} + nodeSelector: {{ toYaml (default .Values.nodeSelector .Values.grafanaDashboard.job.nodeSelector) | nindent 8 }} + {{- end }} + {{- if or .Values.tolerations .Values.grafanaDashboard.job.tolerations }} + tolerations: {{ toYaml (default .Values.tolerations .Values.grafanaDashboard.job.tolerations) | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + volumes: + - name: grafana-dashboard + configMap: + name: grafana-dashboard + defaultMode: 0755 + containers: + - name: grafana-dashboard + image: {{ .Values.grafanaDashboard.job.image.repository }}:{{ .Values.grafanaDashboard.job.image.tag }} + imagePullPolicy: {{ .Values.grafanaDashboard.job.image.pullPolicy }} + {{- if .Values.grafanaDashboard.job.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.grafanaDashboard.job.containerSecurityContext "enabled" | toYaml | nindent 10 }} + {{- end }} + resources: {{- toYaml .Values.grafanaDashboard.job.resources | nindent 10 }} + command: ["/bin/sh", "-c"] + {{- if .Values.webProxy.enabled }} + args: ["http_proxy={{ .Values.webProxy.http }} https_proxy={{ .Values.webProxy.https }} no_proxy={{ .Values.webProxy.noProxy }} /scripts/get_dashboard.sh"] + {{- else }} + args: ["/scripts/get_dashboard.sh"] + {{- end }} + volumeMounts: + - name: grafana-dashboard + mountPath: /scripts/get_dashboard.sh + subPath: get_dashboard.sh + restartPolicy: Never +{{- end }} diff --git a/templates/pvc/ds-service-files.yaml b/templates/pvc/ds-service-files.yaml new file mode 100644 index 0000000..6dde1f9 --- /dev/null +++ b/templates/pvc/ds-service-files.yaml @@ -0,0 +1,23 @@ +{{- if eq (include "ds.pvc.create" .) "true" }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: ds-service-files + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.persistence.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.persistence.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + storageClassName: {{ .Values.persistence.storageClass }} + accessModes: + - ReadWriteMany + volumeMode: Filesystem + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} diff --git a/templates/secrets/grafana-datasource.yaml b/templates/secrets/grafana-datasource.yaml new file mode 100644 index 0000000..5d35c70 --- /dev/null +++ b/templates/secrets/grafana-datasource.yaml @@ -0,0 +1,23 @@ +{{- if .Values.grafana.enabled }} +apiVersion: v1 +kind: Secret +metadata: + name: grafana-datasource + namespace: {{ include "ds.grafana.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: + prometheus.yaml: | + apiVersion: 1 + datasources: + - name: Prometheus + type: prometheus + url: http://prometheus-server + editable: true +{{- end }} diff --git a/templates/secrets/info-auth.yaml b/templates/secrets/info-auth.yaml new file mode 100644 index 0000000..2f608f0 --- /dev/null +++ b/templates/secrets/info-auth.yaml @@ -0,0 +1,17 @@ +{{- if eq (include "ds.info.createSecret" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-info-auth + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: + {{ .Values.documentserver.proxy.infoAllowedSecretKeyName }}: {{ include "ds.info.password" . | quote }} +{{- end }} diff --git a/templates/secrets/jwt.yaml b/templates/secrets/jwt.yaml new file mode 100644 index 0000000..cc726c2 --- /dev/null +++ b/templates/secrets/jwt.yaml @@ -0,0 +1,38 @@ +{{- if eq (include "ds.jwt.createSecret" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: jwt + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: + {{- if typeIs "bool" .Values.jwt.enabled }} + JWT_ENABLED: {{ .Values.jwt.enabled | quote }} + {{- if .Values.jwt.enabled }} + JWT_SECRET: {{ .Values.jwt.secret | quote }} + JWT_HEADER: {{ .Values.jwt.header | quote }} + JWT_IN_BODY: {{ .Values.jwt.inBody | quote }} + {{- end }} + {{- end }} + {{- if typeIs "bool" .Values.jwt.inbox.enabled }} + JWT_ENABLED_INBOX: {{ .Values.jwt.inbox.enabled | quote }} + {{- if .Values.jwt.inbox.enabled }} + JWT_SECRET_INBOX: {{ .Values.jwt.inbox.secret | quote }} + JWT_HEADER_INBOX: {{ .Values.jwt.inbox.header | quote }} + {{- end }} + {{- end }} + {{- if typeIs "bool" .Values.jwt.outbox.enabled }} + JWT_ENABLED_OUTBOX: {{ .Values.jwt.outbox.enabled | quote }} + {{- if .Values.jwt.outbox.enabled }} + JWT_SECRET_OUTBOX: {{ .Values.jwt.outbox.secret | quote }} + JWT_HEADER_OUTBOX: {{ .Values.jwt.outbox.header | quote }} + {{- end }} + {{- end }} +{{- end }} diff --git a/templates/secrets/license.yaml b/templates/secrets/license.yaml new file mode 100644 index 0000000..4329573 --- /dev/null +++ b/templates/secrets/license.yaml @@ -0,0 +1,15 @@ +{{- if eq (include "ds.license.createSecret" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: license + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +{{- end }} diff --git a/templates/secrets/redis-password.yaml b/templates/secrets/redis-password.yaml new file mode 100644 index 0000000..e6f22dc --- /dev/null +++ b/templates/secrets/redis-password.yaml @@ -0,0 +1,20 @@ +{{- if eq (include "ds.redis.createSecret" .) "true" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Release.Name }}-redis + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + helm.sh/hook: pre-install + helm.sh/hook-weight: "1" + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +stringData: + {{ .Values.connections.redisSecretKeyName }}: {{ include "ds.redis.password" . | quote }} +{{- end }} diff --git a/templates/serviceaccount/dashboard.yaml b/templates/serviceaccount/dashboard.yaml new file mode 100644 index 0000000..a1ac562 --- /dev/null +++ b/templates/serviceaccount/dashboard.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.grafana.enabled .Values.grafana.dashboard.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: dashboard-sa + namespace: {{ include "ds.grafana.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: true +{{- end }} diff --git a/templates/serviceaccount/documentserver.yaml b/templates/serviceaccount/documentserver.yaml new file mode 100644 index 0000000..c9c17b5 --- /dev/null +++ b/templates/serviceaccount/documentserver.yaml @@ -0,0 +1,19 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ds.serviceAccountName" . }} + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-weight": "3" + {{- if or .Values.commonAnnotations .Values.serviceAccount.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.serviceAccount.annotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/templates/services/docservice.yaml b/templates/services/docservice.yaml new file mode 100644 index 0000000..3ddfaac --- /dev/null +++ b/templates/services/docservice.yaml @@ -0,0 +1,24 @@ +kind: Service +apiVersion: v1 +metadata: + name: docservice + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + app: documentserver + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + ports: + - name: http + protocol: TCP + port: {{ .Values.documentserver.docservice.containerPorts.http }} + targetPort: {{ .Values.documentserver.docservice.containerPorts.http }} + type: ClusterIP diff --git a/templates/services/documentserver.yaml b/templates/services/documentserver.yaml new file mode 100644 index 0000000..ec793ba --- /dev/null +++ b/templates/services/documentserver.yaml @@ -0,0 +1,36 @@ +{{- if eq (include "ds.svc.create" .) "true" }} +kind: Service +apiVersion: v1 +metadata: + name: documentserver + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.service.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + app: documentserver + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + ports: + - name: http + protocol: TCP + port: {{ .Values.service.port }} + targetPort: {{ .Values.documentserver.proxy.containerPorts.http }} + type: {{ .Values.service.type }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- if .Values.service.sessionAffinityConfig }} + {{- with .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/templates/services/example.yaml b/templates/services/example.yaml new file mode 100644 index 0000000..e51af0c --- /dev/null +++ b/templates/services/example.yaml @@ -0,0 +1,27 @@ +{{- if .Values.example.enabled }} +kind: Service +apiVersion: v1 +metadata: + name: example + namespace: {{ include "ds.namespace" . | quote }} + labels: + app: example + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + ports: + - name: http + protocol: TCP + port: {{ .Values.example.containerPorts.http }} + targetPort: {{ .Values.example.containerPorts.http }} + selector: + statefulset.kubernetes.io/pod-name: example-0 + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + type: ClusterIP +{{- end }} diff --git a/templates/statefulset/example.yaml b/templates/statefulset/example.yaml new file mode 100644 index 0000000..c6747ed --- /dev/null +++ b/templates/statefulset/example.yaml @@ -0,0 +1,104 @@ +{{- if .Values.example.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: example + namespace: {{ include "ds.namespace" . | quote }} + labels: + app: example + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.example.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.example.annotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app: example + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 6 }} + {{- end }} + serviceName: example + replicas: 1 + {{- if .Values.example.updateStrategy }} + updateStrategy: {{- toYaml .Values.example.updateStrategy | nindent 4 }} + {{- end }} + podManagementPolicy: OrderedReady + template: + metadata: + labels: + app: example + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 8 }} + {{- end }} + {{- if .Values.example.podAnnotations }} + annotations: + {{- range $key, $value := .Values.example.podAnnotations }} + {{ $key }}: {{ tpl $value $ }} + {{- end }} + {{- end }} + spec: + serviceAccountName: {{ include "ds.serviceAccountName" . }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: + fsGroup: 1001 + {{- end }} + {{- if or .Values.example.customPodAntiAffinity .Values.example.podAffinity .Values.example.nodeAffinity }} + affinity: + {{- with .Values.example.customPodAntiAffinity }} + podAntiAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.example.podAffinity }} + podAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- with .Values.example.nodeAffinity }} + nodeAffinity: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} + {{- if or .Values.nodeSelector .Values.example.nodeSelector }} + nodeSelector: {{ toYaml (default .Values.nodeSelector .Values.example.nodeSelector) | nindent 8 }} + {{- end }} + {{- if or .Values.tolerations .Values.example.tolerations }} + tolerations: {{ toYaml (default .Values.tolerations .Values.example.tolerations) | nindent 8 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + containers: + - name: example + image: {{ .Values.example.image.repository }}:{{ .Values.example.image.tag }} + imagePullPolicy: {{ .Values.example.image.pullPolicy }} + {{- if .Values.example.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.example.containerSecurityContext "enabled" | toYaml | nindent 10 }} + {{- end }} + {{- if .Values.example.lifecycleHooks }} + lifecycle: {{- toYaml .Values.example.lifecycleHooks | nindent 10 }} + {{- end }} + ports: + - containerPort: {{ .Values.example.containerPorts.http }} + name: http + resources: {{ toYaml .Values.example.resources | nindent 10 }} + envFrom: + - secretRef: + name: {{ template "ds.jwt.secretName" . }} + - configMapRef: + name: example + {{- if .Values.example.extraConf.configMap }} + volumeMounts: + - name: example-custom-file + mountPath: /etc/{{ .Values.product.name }}/documentserver-example/{{ .Values.example.extraConf.filename }} + subPath: {{ .Values.example.extraConf.filename }} + {{- end }} + {{- if .Values.example.extraConf.configMap }} + volumes: + - name: example-custom-file + configMap: + name: {{ .Values.example.extraConf.configMap }} + {{- end }} +{{- end }} diff --git a/templates/tests/test-ds-cm.yaml b/templates/tests/test-ds-cm.yaml new file mode 100644 index 0000000..e423f4c --- /dev/null +++ b/templates/tests/test-ds-cm.yaml @@ -0,0 +1,16 @@ +{{- if .Values.tests.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-ds + namespace: {{ include "ds.namespace" . | quote }} + {{- if .Values.commonLabels }} + labels: + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "ds.annotations.commonAnnotations" ( dict "keyName" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{ (.Files.Glob "sources/scripts/test_ds.py").AsConfig | indent 2 }} +{{- end }} diff --git a/templates/tests/test-ds-pod.yaml b/templates/tests/test-ds-pod.yaml new file mode 100644 index 0000000..261d78c --- /dev/null +++ b/templates/tests/test-ds-pod.yaml @@ -0,0 +1,82 @@ +{{- if .Values.tests.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: test-ds + namespace: {{ include "ds.namespace" . | quote }} + labels: + app.kubernetes.io/component: test-ds + {{- if .Values.commonLabels }} + {{- include "ds.labels.commonLabels" . | trim | nindent 4 }} + {{- end }} + annotations: + "helm.sh/hook": test + "helm.sh/hook-weight": "1" + "helm.sh/hook-delete-policy": before-hook-creation + {{- if or .Values.commonAnnotations .Values.tests.annotations }} + {{- $dictValue := default .Values.commonAnnotations .Values.tests.annotations }} + {{- include "ds.annotations.commonAnnotations" ( dict "keyName" $dictValue "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceAccountName: {{ include "ds.serviceAccountName" . }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 4 }} + {{- end }} + {{- if or .Values.tests.customPodAntiAffinity .Values.tests.podAffinity .Values.tests.nodeAffinity }} + affinity: + {{- with .Values.tests.customPodAntiAffinity }} + podAntiAffinity: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.tests.podAffinity }} + podAffinity: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.tests.nodeAffinity }} + nodeAffinity: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- end }} + {{- if or .Values.nodeSelector .Values.tests.nodeSelector }} + nodeSelector: {{ toYaml (default .Values.nodeSelector .Values.tests.nodeSelector) | nindent 4 }} + {{- end }} + {{- if or .Values.tolerations .Values.tests.tolerations }} + tolerations: {{ toYaml (default .Values.tolerations .Values.tests.tolerations) | nindent 4 }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: + - name: {{ .Values.imagePullSecrets }} + {{- end }} + containers: + - name: test-ds + image: {{ .Values.tests.image.repository }}:{{ .Values.tests.image.tag }} + imagePullPolicy: {{ .Values.tests.image.pullPolicy }} + {{- if .Values.tests.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.tests.containerSecurityContext "enabled" | toYaml | nindent 6 }} + {{- end }} + resources: {{ toYaml .Values.tests.resources | nindent 6 }} + env: + - name: REDIS_SERVER_PWD + valueFrom: + secretKeyRef: + name: {{ template "ds.redis.secretName" . }} + key: {{ .Values.connections.redisSecretKeyName }} + envFrom: + - configMapRef: + name: documentserver + volumeMounts: + - name: test-ds + mountPath: /scripts/test_ds.py + subPath: test_ds.py + {{- if .Values.webProxy.enabled }} + command: ['http_proxy={{ .Values.webProxy.http }}', 'https_proxy={{ .Values.webProxy.https }}', 'no_proxy={{ .Values.webProxy.noProxy }}', 'python', '/scripts/test_ds.py'] + {{- else }} + command: ['python', '/scripts/test_ds.py'] + {{- end }} + volumes: + - name: test-ds + configMap: + name: test-ds + defaultMode: 0755 + restartPolicy: Never +{{- end }} diff --git a/values.yaml b/values.yaml new file mode 100644 index 0000000..b00e78f --- /dev/null +++ b/values.yaml @@ -0,0 +1,1205 @@ +## Dependencies charts parameters + +## ingress-nginx.enabled parameter for manage ingress controller subchart condition +ingress-nginx: + enabled: true + namespaceOverride: default + controller: + replicaCount: 2 + allowSnippetAnnotations: true + extraVolumeMounts: + - name: custom-balancer + mountPath: /etc/nginx/custom_balancer.conf + subPath: custom_balancer.conf + - name: balancer-lua + mountPath: /etc/nginx/lua/balancer.lua + subPath: balancer.lua + extraVolumes: + - name: custom-balancer + configMap: + name: balancer-snippet + - name: balancer-lua + configMap: + name: balancer-lua + service: + annotations: {} + +## redis.enabled parameters for manage redis subchart condition +redis: + enabled: true + architecture: "standalone" + secretAnnotations: + helm.sh/hook: pre-install + helm.sh/hook-weight: "1" + master: + persistence: + size: 8Gi + storageClass: "" + metrics: + enabled: false + auth: + password: "" + +## Default values for Onlyoffice Docs + +## product.name Specifies name of the product +## This is a service variable. You don't need to change it +product: + name: onlyoffice + +## Onlyoffice Docs common parameters +## This block defines common parameters for all resources +## +## Connection parameters to external services +connections: + ## connections.redisConnectorName Defines which connector to use to connect to Redis + ## If you need to connect to Redis Sentinel, set the value `ioredis` + redisConnectorName: redis + ## connections.redisHost The IP address or the name of the Redis host + ## If Redis is deployed inside a k8s cluster, then you need to specify the FQDN name of the service + ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services + ## Not used if values are set in `connections.redisClusterNodes` + redisHost: redis-master.default.svc.cluster.local + ## connections.redisPort The Redis server port number + ## Not used if values are set in `connections.redisClusterNodes` + redisPort: "6379" + ## connections.redisUser The Redis user name + ## ref: https://redis.io/docs/management/security/acl/ + ## The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file + redisUser: default + ## connections.redisDBNum Number of the redis logical database to be selected + ## ref: https://redis.io/commands/select/ + ## The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file + ## Not used if values are set in `connections.redisClusterNodes` + redisDBNum: "0" + ## connections.redisClusterNodes List of nodes in the Redis cluster + ## There is no need to specify every node in the cluster, 3 should be enough + ## You can specify multiple values + ## It must be specified in the `host:port` format + ## Example: + ## redisClusterNodes: + ## - 10.244.0.79:6379 + ## - 192.168.1.25:6379 + redisClusterNodes: [] + ## connections.redisSentinelGroupName Name of a group of Redis instances composed of a master and one or more slaves + ## Used if `connections.redisConnectorName` is set to `ioredis` + redisSentinelGroupName: mymaster + ## connections.redisExistingSecret Name of existing secret to use for Redis passwords + ## Must contain the key specified in `connections.redisSecretKeyName` + ## The password from this secret overrides the value for the password set in the `options` object in `local.json` if you add custom configuration file + redisExistingSecret: redis + ## connections.redisSecretKeyName The name of the key that contains the Redis user password + ## If you set a password in `redisPassword`, a secret will be automatically created, the key name of which will be the value set here + redisSecretKeyName: redis-password + ## connections.redisPassword The password set for the Redis account + ## If set to, it takes priority over the `connections.redisExistingSecret` + ## The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file + redisPassword: "" + ## connections.redisNoPass Defines whether to use a Redis auth without a password + ## If the connection to Redis server does not require a password, set the value to `true` + redisNoPass: false +## Web Proxy parameters +## Used if your network has a web proxy +## ref: https://github.com/ONLYOFFICE/Kubernetes-Docs#11-run-jobs-in-a-private-k8s-cluster-optional +webProxy: + ## webProxy.enabled Specify whether a Web proxy is used in your network to access the Pods of k8s cluster to the Internet + enabled: false + ## webProxy.http Web Proxy address for `HTTP` traffic + http: "http://proxy.example.com" + ## webProxy.https Web Proxy address for `HTTPS` traffic + https: "https://proxy.example.com" + ## webProxy.noProxy Patterns for IP addresses or k8s services name or domain names that shouldn’t use the Web Proxy + noProxy: "localhost,127.0.0.1,docservice" +## privateCluster Specify whether the k8s cluster is used in a private network without internet access +## ref: https://github.com/ONLYOFFICE/Kubernetes-Docs#11-run-jobs-in-a-private-k8s-cluster-optional +privateCluster: false +## namespaceOverride The name of the namespace in which Onlyoffice Docs will be deployed +## If not set, the name will be taken from .Release.Namespace +namespaceOverride: "" +## commonLabels Defines labels that will be additionally added to all the deployed resources +## You can also use `tpl` as the value for the key +## ref: https://helm.sh/docs/chart_best_practices/labels/ +## Example: +## commonLabels: +## app.kubernetes.io/name: "{{ .Chart.Name }}" +## helm.sh/chart: '{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}' +## app.kubernetes.io/managed-by: "{{ .Release.Service }}" +## app.kubernetes.io/instance: "{{ .Release.Name }}" +## app.kubernetes.io/version: "{{ .Chart.AppVersion }}" +commonLabels: {} +## commonAnnotations Defines annotations that will be additionally added to all the deployed resources +## You can also use `tpl` as the value for the key +## Some resources may override the values specified here with their own +## Example: +## commonAnnotations: +## "key1": "value1" +## "key2": "{{ value2 }}" +commonAnnotations: {} +## Service account parameters +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +serviceAccount: + ## serviceAccount.create Enable ServiceAccount creation + create: false + ## serviceAccount.name Name of the ServiceAccount to be used + ## If not set and `serviceAccount.create` is `true` the name will be taken from .Release.Name + ## If not set and `serviceAccount.create` is `false` the name will be "default" + name: "" + ## serviceAccount.annotations Map of annotations to add to the ServiceAccount + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## serviceAccount.automountServiceAccountToken Enable auto mount of ServiceAccountToken on the serviceAccount created + ## Used only if `serviceAccount.create` is `true` + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#opt-out-of-api-credential-automounting + automountServiceAccountToken: true +## Persistence parameters for forgotten and error files +persistence: + ## persistence.existingClaim Name of an existing PVC to use + ## If not specified, a PVC named "ds-service-files" will be created + existingClaim: "" + ## persistence.annotations Defines annotations that will be additionally added to "ds-service-files" PVC + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## persistence.storageClass PVC Storage Class for Onlyoffice Docs service data volume + storageClass: "nfs" + ## persistence.size PVC Storage Request for Onlyoffice Docs volume + size: 8Gi +## Configure a Security Context for a Pod +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +podSecurityContext: + ## podSecurityContext.enabled Enable security context for the pods + ## If set to true, `podSecurityContext` is enabled for all resources describing the podTemplate + enabled: false + ## podSecurityContext.fsGroup Defines the Group ID to which the owner and permissions for all files in volumes are changed when mounted in the Pod + fsGroup: 101 +## Pod anti-affinity parameters +## Pod anti-affinity prohibits at all (required) or, if possible (preferred), placing a second pod with the same label on the same node +podAntiAffinity: + ## podAntiAffinity.type Types of Pod antiaffinity. Allowed values: `soft` or `hard` (soft=preferred, hard=required) + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + type: "soft" + ## podAntiAffinity.topologyKey Node label key to match + topologyKey: kubernetes.io/hostname + ## podAntiAffinity.weight Priority when selecting node. It is in the range from 1 to 100. Used only when `podAntiAffinity.type=soft` + weight: "100" +## nodeSelector Node labels for pods assignment +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector +nodeSelector: {} +## tolerations Tolerations for pods assignment +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ +tolerations: [] +## imagePullSecrets Container image registry secret name +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: "" +## Onlyoffice Docs service parameters +service: + ## service.existing The name of an existing service for ONLYOFFICE Docs. If not set, a service named `documentserver` will be created + ## ref: https://github.com/ONLYOFFICE/Kubernetes-Docs/blob/master/templates/services/documentserver.yaml + existing: "" + ## service.annotations Map of annotations to add to the ONLYOFFICE Docs service + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## service.type ONLYOFFICE Docs service type + type: ClusterIP + ## service.port ONLYOFFICE Docs service port + port: 8888 + ## service.sessionAffinity Session Affinity for ONLYOFFICE Docs service + ## If not set, `None` will be set as the default value + ## ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + ## service.sessionAffinityConfig Configuration for ONLYOFFICE Docs service Session Affinity + ## Used if the `service.sessionAffinity` is set + ## ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-stickiness-timeout + ## Example: + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 900 + sessionAffinityConfig: {} +## Onlyoffice Docs license +license: + ## license.existingSecret Name of the existing secret that contains the license + ## Must contain the key `license.lic` + existingSecret: "" + ## license.existingClaim Name of the existing PVC in which the license is stored + ## Must contain the file `license.lic` + existingClaim: "" +## Onlyoffice Docs logging parameters +log: + ## log.level Defines the type and severity of a logged event + ## Possible values are `ALL`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`, `MARK`, `OFF` + level: ALL + ## log.type Defines the format of a logged event + ## Possible values are `pattern`, `json`, `basic`, `coloured`, `messagePassThrough`, `dummy` + type: pattern + ## log.pattern Defines the log pattern if `log.type=pattern` + ## ref: https://github.com/log4js-node/log4js-node/blob/master/docs/layouts.md#pattern-format + pattern: "[%d] [%p] [%X{DOCID}] [%X{USERID}] %c - %.10000m" +## wopi.enabled Defines if `WOPI` is enabled +## If the parameter is enabled, then caching attributes for the mounted directory (`PVC`) should be disabled for the client +wopi: + enabled: false +## Onlyoffice Docs metrics parameters +## StatsD is used as an intermediate metric collector +metrics: + ## metrics.enabled Specifies the enabling StatsD for ONLYOFFICE Docs + enabled: false + ## metrics.host Defines StatsD listening host + host: statsd-exporter-prometheus-statsd-exporter + ## metrics.port Defines StatsD listening port + port: "8125" + ## metrics.prefix Defines StatsD metrics prefix for backend services + prefix: ds. +## JSON Web Token parameters +jwt: + ## jwt.enabled Specifies the enabling the JSON Web Token validation by the ONLYOFFICE Docs + ## Common for inbox and outbox requests + enabled: true + ## jwt.secret Defines the secret key to validate the JSON Web Token in the request to the ONLYOFFICE Docs + ## Common for inbox and outbox requests + secret: "MYSECRET" + ## jwt.header Defines the http header that will be used to send the JSON Web Token + ## Common for inbox and outbox requests + header: "Authorization" + ## jwt.inBody Specifies the enabling the token validation in the request body to the ONLYOFFICE Docs + inBody: false + ## jwt.inbox JSON Web Token validation parameters for inbox requests only + ## jwt.outbox JSON Web Token validation parameters for outbox requests only + ## If not specified, the values of the parameters of the common `jwt` are used + ## Example: + ## inbox: + ## enabled: true + ## secret: "MYSECRET" + ## header: "Authorization" + inbox: {} + ## outbox: + ## enabled: true + ## secret: "MYSECRET" + ## header: "Authorization" + outbox: {} + ## jwt.existingSecret The name of an existing secret containing variables for jwt + ## If not specified, a secret named `jwt` will be created + existingSecret: "" +## Config for overriding default values +extraConf: + ## extraConf.configMap The name of the ConfigMap containing the json file that override the default values + configMap: "" + ## extraConf.filename The name of the json file that contains custom values + ## Must be the same as the `key` name in `extraConf.ConfigMap` + filename: local-production-linux.json +## Additional customers interface themes +extraThemes: + ## extraThemes.configMap The name of the ConfigMap containing the json file that contains the interface themes + configMap: "" + ## extraThemes.filename The name of the json file that contains custom interface themes + ## Must be the same as the `key` name in `extraThemes.configMap` + filename: custom-themes.json +## sqlScripts.branchName The name of the repository branch from which sql scripts will be downloaded +## ref: https://github.com/ONLYOFFICE/server/tree/master/schema +sqlScripts: + branchName: master +## Onlyoffice Docs request-filtering-agent parameters +## These parameters are used if JWT is disabled: `jwt.enabled=false` +requestFilteringAgent: + ## requestFilteringAgent.allowPrivateIPAddress Defines if it is allowed to connect private IP address or not + allowPrivateIPAddress: true + ## requestFilteringAgent.allowMetaIPAddress Defines if it is allowed to connect meta address or not + allowMetaIPAddress: true + ## requestFilteringAgent.allowIPAddressList Defines the list of IP addresses allowed to connect + ## This values are preferred than `requestFilteringAgent.denyIPAddressList` + ## Example: + ## allowIPAddressList: + ## - 10.244.0.79 + ## - 192.168.1.25 + allowIPAddressList: [] + ## requestFilteringAgent.denyIPAddressList Defines the list of IP addresses allowed to connect + ## Example: + ## denyIPAddressList: + ## - 10.244.0.80 + denyIPAddressList: [] + +## Onlyoffice Docs Documentserver Deployment parameters +## This block defines the parameters common to all the Pods of this deployment +## The Pod this deployment creates consists of several containers: proxy, docservice, converter-[1..N], postgresql and rabbitmq +## +documentserver: + ## documentserver.terminationGracePeriodSeconds The time to terminate gracefully during which the Pod will have the Terminating status + terminationGracePeriodSeconds: 60 + ## documentserver.keysRedisDBNum The number of the database for storing the balancing results + keysRedisDBNum: "1" + ## documentserver.KeysExpireTime The time in seconds after which the key will be deleted from the balancing database. 172800 mean 48 hours + keysExpireTime: 172800 + ## documentserver.ingressCustomConfigMapsNamespace define where custom controller configmaps will be deployed + ## Should be the same ns where controller is deployed + ingressCustomConfigMapsNamespace: default + ## documentserver.annotations Defines annotations that will be additionally added to Documentserver Deployment + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## documentserver.podAnnotations Map of annotations to add to the Documentserver deployment pods + podAnnotations: + rollme: "{{ randAlphaNum 5 | quote }}" + ## documentserver.replicas Number of Documentserver replicas to deploy + ## If the `documentserver.autoscaling.enabled` parameter is enabled, it is ignored + replicas: 3 + ## Update strategy used to replace old Pods by new ones. Allowed values: `RollingUpdate` or `Recreate` + ## It is recommended to use the `RollingUpdate` type + ## docservice.updateStrategy.type Docservice deployment update strategy type + updateStrategy: + type: RollingUpdate + ## documentserver.customPodAntiAffinity Prohibiting the scheduling of Documentserver Pods relative to other Pods containing the specified labels on the same node + ## Example: + ## customPodAntiAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## - labelSelector: + ## matchExpressions: + ## - key: app + ## operator: In + ## values: + ## - example + ## topologyKey: kubernetes.io/hostname + customPodAntiAffinity: {} + ## Pod affinity rules for Documentserver Pods scheduling by nodes relative to other Pods + ## Pod affinity allow you to constrain which nodes Documentserver Pods can be scheduled on based on the labels of Pods already running on that node + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Example: + ## podAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## - labelSelector: + ## matchExpressions: + ## - key: app + ## operator: In + ## values: + ## - store + ## topologyKey: topology.kubernetes.io/zone + ## + podAffinity: {} + ## Node affinity rules for Documentserver Pods scheduling by nodes + ## Node affinity allow you to constrain which nodes Docservice Pod can be scheduled on based on node labels + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Example: + ## nodeAffinity: + ## preferredDuringSchedulingIgnoredDuringExecution: + ## - weight: 100 + ## preference: + ## matchExpressions: + ## - key: kubernetes.io/name + ## operator: In + ## values: + ## - name1 + ## - name2 + ## + nodeAffinity: {} + ## documentserver.nodeSelector Node labels for Documentserver Pods assignment + ## If set to, it takes priority over the `nodeSelector` + nodeSelector: {} + ## documentserver.tolerations Tolerations for Documentserver Pods assignment + ## If set to, it takes priority over the `tolerations` + tolerations: [] + ## Horizontal Pod Autoscaling parameters + ## Horizontal Pod Autoscaling is used for autoscaling of the Documentserver deployment + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## The parameters below for autoscaling are used only when `documentserver.autoscaling.enabled=true` + autoscaling: + ## documentserver.autoscaling.enabled Enable Documentserver deployment autoscaling + enabled: false + ## documentserver.autoscaling.annotations Defines annotations that will be additionally added to Documentserver deployment HPA + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## documentserver.autoscaling.minReplicas Documentserver deployment autoscaling minimum number of replicas + minReplicas: 2 + ## documentserver.autoscaling.maxReplicas Documentserver deployment autoscaling maximum number of replicas + maxReplicas: 4 + targetCPU: + ## documentserver.autoscaling.targetCPU.enabled Enable autoscaling of Documentserver deployment by CPU usage percentage + enabled: true + ## documentserver.autoscaling.targetCPU.utilizationPercentage Documentserver deployment autoscaling target CPU percentage + utilizationPercentage: 70 + targetMemory: + ## documentserver.autoscaling.targetMemory.enabled Enable autoscaling of Documentserver deployment by memory usage percentage + enabled: false + ## documentserver.autoscaling.targetMemory.utilizationPercentage Documentserver deployment autoscaling target memory percentage + utilizationPercentage: 70 + ## documentserver.autoscaling.customMetricsType Custom, additional or external autoscaling metrics for the documentserver deployment + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics + ## Example: + ## customMetricsType: + ## - type: Object + ## object: + ## metric: + ## name: requests-per-second + ## describedObject: + ## apiVersion: networking.k8s.io/v1 + ## kind: Ingress + ## name: main-route + ## target: + ## type: Value + ## value: 2k + customMetricsType: [] + ## documentserver.autoscaling.behavior Configuring Documentserver deployment scaling behavior policies for the `scaleDown` and `scaleUp` fields + ## If not set the default values are used: + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#default-behavior + ## Example: + ## behavior: + ## scaleDown: + ## stabilizationWindowSeconds: 300 + ## policies: + ## - type: Pods + ## value: 4 + ## periodSeconds: 60 + ## scaleUp: + ## stabilizationWindowSeconds: 0 + ## policies: + ## - type: Percent + ## value: 70 + ## periodSeconds: 15 + ## selectPolicy: Max + behavior: {} + ## Documentserver initContainers parameters + ## Containers that run before all containers in a Pod + ## ref:https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## This Init Container adds an entry to Redis containing the name of the Pod and its IP + initContainers: + image: + ## documentserver.initContainers.image.repository Documentserver add-shardkey initContainer image repository + repository: onlyoffice/docs-utils + ## documentserver.initContainers.image.tag Documentserver add-shardkey initContainer image tag + tag: 8.1.1-2 + ## documentserver.initContainers.image.pullPolicy Documentserver add-shardkey initContainer image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for Documentserver add-shardkey initContainer container in Pod + containerSecurityContext: + ## documentserver.initContainers.containerSecurityContext.enabled Enable security context for Documentserver add-shardkey initContainer container + enabled: false + ## documentserver.initContainers.containerSecurityContext.runAsUser User ID for Documentserver add-shardkey initContainer container + runAsUser: 101 + ## documentserver.initContainers.containerSecurityContext.runAsGroup Group ID for Documentserver add-shardkey initContainer container + runAsGroup: 101 + ## documentserver.initContainers.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## documentserver.initContainers.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## documentserver.initContainers.containerSecurityContext.seccompProfile Defines the Seccomp profile for Documentserver add-shardkey initContainer container + seccompProfile: + type: RuntimeDefault + ## documentserver.initContainers.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + # Documentserver add-shardkey initContainer resource requests and limits + # ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + # documentserver.initContainers.resources.requests The requested resources for the Documentserver add-shardkey initContainer + # documentserver.initContainers.resources.limits The resources limits for the Documentserver add-shardkey initContainer + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "1Gi" + cpu: "1000m" + # documentserver.initContainers.custom Custom Documentserver initContainers parameters + # Additional containers that run before all containers in a Pod + ## Example: + ## custom: + ## - name: change-volume-owner + ## image: busybox:latest + ## command: ['chown', '-R', '101:101', '/var/lib/onlyoffice/documentserver/App_Data/cache/files'] + ## volumeMounts: + ## - name: ds-files + ## mountPath: /var/lib/onlyoffice/documentserver/App_Data/cache/files + custom: [] + ## + ## Documentserver Containers parameters + ## Parameters of the Docservice container + ## + docservice: + image: + ## documentserver.docservice.image.repository docservice container image repository + ## For more information, see after the Parameters table + ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters + repository: onlyoffice/docs-docservice-de + ## documentserver.docservice.image.tag docservice container image tag + tag: 8.1.1-2 + ## documentserver.docservice.image.pullPolicy docservice container image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Docservice container + containerSecurityContext: + ## documentserver.docservice.containerSecurityContext.enabled Enable security context for the Docservice container + enabled: false + ## documentserver.docservice.containerSecurityContext.runAsUser User ID for the Docservice container + runAsUser: 101 + ## documentserver.docservice.containerSecurityContext.runAsGroup Group ID for the Docservice container + runAsGroup: 101 + ## documentserver.docservice.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## documentserver.docservice.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## documentserver.docservice.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Docservice container + seccompProfile: + type: RuntimeDefault + ## documentserver.docservice.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## documentserver.docservice.containerPorts.http docservice container port + containerPorts: + http: 8000 + ## Probe used for the docservice container: startup, readiness and liveness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## The parameters below for startup probes are used only when `documentserver.docservice.startupProbe.enabled=true` + startupProbe: + ## documentserver.docservice.startupProbe.enabled Enable startupProbe for docservice container + enabled: true + httpGet: + ## documentserver.docservice.startupProbe.httpGet.path Checking the path for startupProbe + path: /index.html + ## documentserver.docservice.startupProbe.httpGet.port Checking the port for startupProbe + port: 8000 + ## documentserver.docservice.startupProbe.failureThreshold Failure threshold for startupProbe + failureThreshold: 30 + ## documentserver.docservice.startupProbe.periodSeconds Period seconds for startupProbe + periodSeconds: 10 + ## The parameters below for readiness probes are used only when `documentserver.docservice.readinessProbe.enabled=true` + readinessProbe: + ## documentserver.docservice.readinessProbe.enabled Enable readinessProbe for docservice container + enabled: true + ## documentserver.docservice.readinessProbe.failureThreshold Failure threshold for readinessProbe + failureThreshold: 2 + httpGet: + ## documentserver.docservice.readinessProbe.httpGet.path Checking the path for readinessProbe + path: /index.html + ## documentserver.docservice.readinessProbe.httpGet.port Checking the port for readinessProbe + port: 8000 + ## documentserver.docservice.readinessProbe.periodSeconds Period seconds for readinessProbe + periodSeconds: 10 + ## documentserver.docservice.readinessProbe.successThreshold Success threshold for readinessProbe + successThreshold: 1 + ## documentserver.docservice.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + timeoutSeconds: 3 + ## The parameters below for liveness probes are used only when `documentserver.docservice.livenessProbe.enabled=true` + livenessProbe: + ## documentserver.docservice.livenessProbe.enabled Enable livenessProbe for docservice container + enabled: true + ## documentserver.docservice.livenessProbe.failureThreshold Failure threshold for livenessProbe + failureThreshold: 3 + httpGet: + ## documentserver.docservice.livenessProbe.httpGet.path Checking the path for livenessProbe + path: /index.html + ## documentserver.docservice.livenessProbe.httpGet.port Checking the port for livenessProbe + port: 8000 + ## documentserver.docservice.livenessProbe.periodSeconds Period seconds for livenessProbe + periodSeconds: 10 + ## documentserver.docservice.livenessProbe.successThreshold Success threshold for livenessProbe + successThreshold: 1 + ## documentserver.docservice.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + timeoutSeconds: 3 + ## docservice container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## documentserver.docservice.resources.requests The requested resources for the docservice container + ## documentserver.docservice.resources.limits The resources limits for the docservice container + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "4000m" + ## Parameters of the Proxy container + ## + proxy: + ## documentserver.proxy.accessLog Defines the nginx config access_log format directive + ## ref: https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log + ## Example: + ## accessLog: "main" + accessLog: "main" + ## documentserver.proxy.gzipProxied Defines the nginx config gzip_proxied directive + ## ref: https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied + gzipProxied: "off" + ## documentserver.proxy.clientMaxBodySize Defines the nginx config client_max_body_size directive + ## ref: https://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size + clientMaxBodySize: "100m" + ## documentserver.proxy.workerConnections Defines the nginx config worker_connections directive + ## ref: https://nginx.org/en/docs/ngx_core_module.html#worker_connections + workerConnections: "4096" + ## documentserver.proxy.workerProcesses Defines the nginx config worker_processes directive + ## ref: https://nginx.org/en/docs/ngx_core_module.html#worker_processes + workerProcesses: "1" + ## documentserver.proxy.secureLinkSecret Defines secret for the nginx config directive secure_link_md5 + ## ref: https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_md5 + secureLinkSecret: verysecretstring + ## documentserver.proxy.infoAllowedIP Defines ip addresses for accessing the info page + ## Example: + infoAllowedIP: + - 10.244.0.0/16 + - 10.135.0.0/16 + ## infoAllowedIP: [] + ## documentserver.proxy.infoAllowedUser Defines user name for accessing the info page + ## If not set to, Nginx Basic Authentication will not be applied to access the info page + ## ref: https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html + ## For more details, see here: + ## ref: https://github.com/ONLYOFFICE/Kubernetes-Docs#12-access-to-the-info-page-optional + infoAllowedUser: "" + ## documentserver.proxy.infoAllowedSecretKeyName The name of the key that contains the info auth user password + ## Used if `documentserver.proxy.infoAllowedUser` is set + infoAllowedSecretKeyName: info-auth-password + ## documentserver.proxy.infoAllowedExistingSecret Name of existing secret to use for info auth password + ## Used if `documentserver.proxy.infoAllowedUser` is set + ## Must contain the key specified in `documentserver.proxy.infoAllowedSecretKeyName` + ## If set to, it takes priority over the `documentserver.proxy.infoAllowedPassword` + infoAllowedExistingSecret: "" + ## documentserver.proxy.infoAllowedPassword Defines user password for accessing the info page + ## Used if `documentserver.proxy.infoAllowedUser` is set + infoAllowedPassword: "password" + ## documentserver.proxy.welcomePage.enabled Defines whether the welcome page will be displayed + welcomePage: + enabled: true + image: + ## documentserver.proxy.image.repository proxy container image repository + ## For more information, see after the Parameters table + ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters + repository: onlyoffice/docs-proxy-de + ## documentserver.proxy.image.tag proxy container image tag + tag: 8.1.1-2 + ## documentserver.proxy.image.pullPolicy proxy container image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Proxy container + containerSecurityContext: + ## documentserver.proxy.containerSecurityContext.enabled Enable security context for the Proxy container + enabled: false + ## documentserver.proxy.containerSecurityContext.runAsUser User ID for the Proxy container + runAsUser: 101 + ## documentserver.proxy.containerSecurityContext.runAsGroup Group ID for the Proxy container + runAsGroup: 101 + ## documentserver.proxy.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## documentserver.proxy.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## documentserver.proxy.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Proxy container + seccompProfile: + type: RuntimeDefault + ## documentserver.proxy.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## documentserver.proxy.containerPorts.http proxy container port + containerPorts: + http: 8888 + ## Probe used for the proxy container: startup, readiness and liveness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## The parameters below for startup probes are used only when `documentserver.proxy.startupProbe.enabled=true` + startupProbe: + ## documentserver.proxy.startupProbe.enabled Enable startupProbe for proxy container + enabled: true + httpGet: + ## documentserver.proxy.startupProbe.httpGet.path Checking the path for startupProbe + path: /index.html + ## documentserver.proxy.startupProbe.httpGet.port Checking the port for startupProbe + port: 8888 + ## documentserver.proxy.startupProbe.failureThreshold Failure threshold for startupProbe + failureThreshold: 30 + ## documentserver.proxy.startupProbe.periodSeconds Period seconds for startupProbe + periodSeconds: 10 + ## The parameters below for readiness probes are used only when `documentserver.proxy.readinessProbe.enabled=true` + readinessProbe: + ## documentserver.proxy.readinessProbe.enabled Enable readinessProbe for proxy container + enabled: true + ## documentserver.proxy.readinessProbe.failureThreshold Failure threshold for readinessProbe + failureThreshold: 2 + httpGet: + ## documentserver.proxy.readinessProbe.httpGet.path Checking the path for readinessProbe + path: /index.html + ## documentserver.proxy.readinessProbe.httpGet.port Checking the port for readinessProbe + port: 8888 + ## documentserver.proxy.readinessProbe.periodSeconds Period seconds for readinessProbe + periodSeconds: 10 + ## documentserver.proxy.readinessProbe.successThreshold Success threshold for readinessProbe + successThreshold: 1 + ## documentserver.proxy.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + timeoutSeconds: 3 + ## The parameters below for liveness probes are used only when `documentserver.proxy.livenessProbe.enabled=true` + livenessProbe: + ## documentserver.proxy.livenessProbe.enabled Enable livenessProbe for proxy container + enabled: true + ## documentserver.proxy.livenessProbe.failureThreshold Failure threshold for livenessProbe + failureThreshold: 3 + httpGet: + ## documentserver.proxy.livenessProbe.httpGet.path Checking the path for livenessProbe + path: /index.html + ## documentserver.proxy.livenessProbe.httpGet.port Checking the port for livenessProbe + port: 8888 + ## documentserver.proxy.livenessProbe.periodSeconds Period seconds for livenessProbe + periodSeconds: 10 + ## documentserver.proxy.livenessProbe.successThreshold Success threshold for livenessProbe + successThreshold: 1 + ## documentserver.proxy.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + timeoutSeconds: 3 + ## proxy container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## documentserver.proxy.resources.requests The requested resources for the proxy container + ## documentserver.proxy.resources.limits The resources limits for the proxy container + resources: + requests: + memory: "256Mi" + cpu: "100m" + limits: + memory: "4Gi" + cpu: "4000m" + ## Parameters of the Converter container + ## + converter: + ## documentserver.converter.count The mumber of Converter containers in the Documentserver Pod + count: 3 + image: + ## documentserver.converter.image.repository converter container image repository + ## For more information, see after the Parameters table + ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters + repository: onlyoffice/docs-converter-de + ## documentserver.converter.image.tag converter container image tag + tag: 8.1.1-2 + ## documentserver.converter.image.pullPolicy converter container image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Converter container + containerSecurityContext: + ## documentserver.converter.containerSecurityContext.enabled Enable security context for the Converter container + enabled: false + ## documentserver.converter.containerSecurityContext.runAsUser User ID for the Converter container + runAsUser: 101 + ## documentserver.converter.containerSecurityContext.runAsGroup Group ID for the Converter container + runAsGroup: 101 + ## documentserver.converter.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## documentserver.converter.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## documentserver.converter.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Converter container + seccompProfile: + type: RuntimeDefault + ## documentserver.converter.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## converter container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## documentserver.converter.resources.requests The requested resources for the converter container + ## documentserver.converter.resources.limits The resources limits for the converter container + resources: + requests: + memory: "256Mi" + cpu: "200m" + limits: + memory: "4Gi" + cpu: "4000m" + ## Parameters of the Postgresql container + ## + postgresql: + image: + ## documentserver.postgresql.image.repository Postgresql container image repository + ## For more information, see after the Parameters table + ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters + repository: postgres + ## documentserver.postgresql.image.tag Postgresql container image tag + tag: 16 + ## documentserver.postgresql.image.pullPolicy Postgresql container image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Postgresql container + containerSecurityContext: + ## documentserver.postgresql.containerSecurityContext.enabled Enable security context for the Postgresql container + enabled: false + ## documentserver.postgresql.containerSecurityContext.runAsUser User ID for the Postgresql container + runAsUser: 0 + ## documentserver.postgresql.containerSecurityContext.runAsGroup Group ID for the Postgresql container + runAsGroup: 0 + ## documentserver.postgresql.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## documentserver.postgresql.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Postgresql container + seccompProfile: + type: RuntimeDefault + ## documentserver.postgresql.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## documentserver.postgresql.containerPorts.tcp Postgresql container port + containerPorts: + tcp: 5432 + ## Postgresql container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## documentserver.postgresql.resources.requests The requested resources for the Postgresql container + ## documentserver.postgresql.resources.limits The resources limits for the Postgresql container + resources: + requests: + memory: "256Mi" + cpu: "200m" + limits: + memory: "4Gi" + cpu: "4000m" + ## Parameters of the Rabbitmq container + ## + rabbitmq: + image: + ## documentserver.rabbitmq.image.repository Rabbitmq container image repository + ## For more information, see after the Parameters table + ## https://github.com/ONLYOFFICE/Kubernetes-Docs#4-parameters + repository: rabbitmq + ## documentserver.rabbitmq.image.tag Rabbitmq container image tag + tag: 3.12.10 + ## documentserver.rabbitmq.image.pullPolicy Rabbitmq container image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Rabbitmq container + containerSecurityContext: + ## documentserver.rabbitmq.containerSecurityContext.enabled Enable security context for the Rabbitmq container + enabled: false + ## documentserver.rabbitmq.containerSecurityContext.runAsUser User ID for the Rabbitmq container + runAsUser: 0 + ## documentserver.rabbitmq.containerSecurityContext.runAsGroup Group ID for the Rabbitmq container + runAsGroup: 0 + ## documentserver.rabbitmq.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## documentserver.rabbitmq.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Rabbitmq container + seccompProfile: + type: RuntimeDefault + ## documentserver.rabbitmq.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## documentserver.rabbitmq.containerPorts.amqp Rabbitmq container port + containerPorts: + amqp: 5672 + ## Rabbitmq container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## documentserver.rabbitmq.resources.requests The requested resources for the Rabbitmq container + ## documentserver.rabbitmq.resources.limits The resources limits for the Rabbitmq container + resources: + requests: + memory: "256Mi" + cpu: "200m" + limits: + memory: "4Gi" + cpu: "4000m" + +## Onlyoffice Docs Example StatefulSet parameters +## +example: + ## example.enabled Enables Example installation + enabled: false + ## example.dsUrl Onlyoffice Docs external address + ## It should be changed only if it is necessary to check the operation of the conversion in Example + ## E.g. + ## dsUrl: "http:///" + dsUrl: "/" + ## example.annotations Defines annotations that will be additionally added to Example StatefulSet + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## example.podAnnotations Map of annotations to add to the Example pod + podAnnotations: + rollme: "{{ randAlphaNum 5 | quote }}" + ## Update strategy used to replace old Pods by new ones. Allowed values: `RollingUpdate` or `OnDelete` + ## example.updateStrategy.type Example StatefulSet update strategy type + updateStrategy: + type: RollingUpdate + ## example.customPodAntiAffinity Prohibiting the scheduling of Example Pod relative to other Pods containing the specified labels on the same node + ## Example: + ## customPodAntiAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## - labelSelector: + ## matchExpressions: + ## - key: app + ## operator: In + ## values: + ## - docservice + ## topologyKey: kubernetes.io/hostname + customPodAntiAffinity: {} + ## Pod affinity rules for Example Pod scheduling by nodes relative to other Pods + ## Pod affinity allow you to constrain which nodes Example Pod can be scheduled on based on the labels of Pods already running on that node + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## Example: + ## podAffinity: + ## requiredDuringSchedulingIgnoredDuringExecution: + ## - labelSelector: + ## matchExpressions: + ## - key: app + ## operator: In + ## values: + ## - store + ## topologyKey: topology.kubernetes.io/zone + ## preferredDuringSchedulingIgnoredDuringExecution: + ## - weight: 100 + ## podAffinityTerm: + ## labelSelector: + ## matchExpressions: + ## - key: app + ## operator: NotIn + ## values: + ## - database + ## topologyKey: kubernetes.io/hostname + podAffinity: {} + ## Node affinity rules for Example Pod scheduling by nodes + ## Node affinity allow you to constrain which nodes Example Pod can be scheduled on based on node labels + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## Example: + ## nodeAffinity: + ## preferredDuringSchedulingIgnoredDuringExecution: + ## - weight: 100 + ## preference: + ## matchExpressions: + ## - key: kubernetes.io/name + ## operator: In + ## values: + ## - name1 + ## - name2 + ## requiredDuringSchedulingIgnoredDuringExecution: + ## nodeSelectorTerms: + ## - matchExpressions: + ## - key: topology.kubernetes.io/zone + ## operator: NotIn + ## values: + ## - zone1 + ## - zone2 + nodeAffinity: {} + ## example.nodeSelector Node labels for Example Pods assignment + ## If set to, it takes priority over the `nodeSelector` + nodeSelector: {} + ## example.tolerations Tolerations for Example Pods assignment + ## If set to, it takes priority over the `tolerations` + tolerations: [] + ## Example container image parameters + image: + ## example.image.repository example container image name + repository: onlyoffice/docs-example + ## example.image.tag example container image tag + tag: 8.1.1-2 + ## example.image.pullPolicy example container image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Example container + containerSecurityContext: + ## example.containerSecurityContext.enabled Enable security context for the Example container + enabled: false + ## example.containerSecurityContext.runAsUser User ID for the Example container + runAsUser: 1001 + ## example.containerSecurityContext.runAsGroup Group ID for the Example container + runAsGroup: 1001 + ## example.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## example.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## example.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Example container + seccompProfile: + type: RuntimeDefault + ## example.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## example.lifecycleHooks Defines the Example container lifecycle hooks + ## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/ + ## It is used to trigger events to run at certain points in a container's lifecycle + ## There are two hooks that are exposed: `PostStart` and `PreStop` + ## Example: + ## lifecycleHooks: + ## preStop: + ## exec: + ## command: ["/bin/sh", "-c", "sleep 25"] + lifecycleHooks: {} + ## example.containerPorts.http example container port + containerPorts: + http: 3000 + ## example container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## example.resources.requests The requested resources for the example container + ## example.resources.limits The resources limits for the example container + resources: + ## Example: + ## requests: + ## memory: "128Mi" + ## cpu: "100m" + requests: {} + ## limits: + ## memory: "128Mi" + ## cpu: "250m" + limits: {} + ## Example config for overriding default values + extraConf: + ## example.extraConf.configMap The name of the ConfigMap containing the json file that override the default values + configMap: "" + ## example.extraConf.filename The name of the json file that contains custom values + ## Must be the same as the `key` name in `example.extraConf.ConfigMap` + filename: local.json + +## Onlyoffice Docs ingress parameters +## +ingress: + ## ingress.enabled Enable the creation of an ingress for the ONLYOFFICE Docs + enabled: true + ## ingress.annotations Map of annotations to add to the Ingress + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: 100m + # nginx.ingress.kubernetes.io/upstream-hash-by: "$arg_WOPISrc" + # ingress.ingressClassName Used to reference the IngressClass that should be used to implement this Ingress + # ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#the-ingress-resource/ + ingressClassName: nginx + ssl: + ## ingress.ssl.enabled Enable TLS for the ONLYOFFICE Docs ingress + enabled: false + ## ingress.ssl.secret Secret name for TLS to mount into the Ingress + ## Used only when `ingress.ssl.enabled=true + secret: tls + ## ingress.host Ingress hostname for the ONLYOFFICE Docs + host: "" + ## ingress.path Specifies the path where ONLYOFFICE Docs will be available + ## Example: + ## path: "/ds" + path: "/" + +## Grafana parameters +## See more details about installing Grafana here: +## ref: https://github.com/ONLYOFFICE/Kubernetes-Docs#using-grafana-to-visualize-metrics-optional +grafana: + ## grafana.enabled Enable the installation of resources required for the visualization of metrics in Grafana + enabled: false + ## grafana.namespace The name of the namespace in which RBAC components and Grafana resources will be deployed + ## If not set, the name will be taken from `namespaceOverride` or .Release.Namespace + namespace: "" + ## grafana.ingress.enabled Enable the creation of an ingress for the Grafana + ## Used if you set `grafana.enabled` to `true` and want to use Nginx Ingress to access Grafana + ingress: + enabled: false + ## grafana.ingress.annotations Map of annotations to add to Grafana Ingress + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: 100m + ## grafana.dashboard.enabled Enable the installation of ready-made Grafana dashboards + ## Used if you set `grafana.enabled` to `true` + dashboard: + enabled: false + +## Onlyoffice Docs jobs parameters +## +## Job by Grafana Dashboard has post-install and post-upgrade hooks and executes after any resources are created in Kubernetes +## ref: https://helm.sh/docs/topics/charts_hooks/#the-available-hooks +## He creates Config Maps containing Grafana dashboards +## It is executed if `grafana.enabled` and `grafana.dashboard.enabled` are set to `true` +grafanaDashboard: + job: + ## grafanaDashboard.job.annotations Defines annotations that will be additionally added to Grafana Dashboard Job + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## grafanaDashboard.job.podAnnotations Map of annotations to add to the Grafana Dashboard Pod + podAnnotations: {} + ## grafanaDashboard.job.customPodAntiAffinity Prohibiting the scheduling of Grafana Dashboard Job Pod relative to other Pods containing the specified labels on the same node + customPodAntiAffinity: {} + ## Pod affinity rules for Grafana Dashboard Job Pod scheduling by nodes relative to other Pods + ## Pod affinity allow you to constrain which nodes Grafana Dashboard Job Pod can be scheduled on based on the labels of Pods already running on that node + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + podAffinity: {} + ## Node affinity rules for Grafana Dashboard Job Pod scheduling by nodes + ## Node affinity allow you to constrain which nodes Grafana Dashboard Job Pod can be scheduled on based on node labels + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + nodeAffinity: {} + ## grafanaDashboard.job.nodeSelector Node labels for Grafana Dashboard Job Pod assignment + ## If set to, it takes priority over the `nodeSelector` + nodeSelector: {} + ## grafanaDashboard.job.tolerations Tolerations for Grafana Dashboard Job Pod assignment + ## If set to, it takes priority over the `tolerations` + tolerations: [] + image: + ## grafanaDashboard.job.image.repository Job by Grafana Dashboard ONLYOFFICE Docs image repository + repository: onlyoffice/docs-utils + ## grafanaDashboard.job.image.tag Job by Grafana Dashboard ONLYOFFICE Docs image tag + tag: 8.1.1-2 + ## grafanaDashboard.job.image.pullPolicy Job by Grafana Dashboard ONLYOFFICE Docs image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Grafana Dashboard container + containerSecurityContext: + ## grafanaDashboard.job.containerSecurityContext.enabled Enable security context for the Grafana Dashboard container + enabled: false + ## grafanaDashboard.job.containerSecurityContext.runAsUser User ID for the Grafana Dashboard container + runAsUser: 101 + ## grafanaDashboard.job.containerSecurityContext.runAsGroup Group ID for the Grafana Dashboard container + runAsGroup: 101 + ## grafanaDashboard.job.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## grafanaDashboard.job.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## grafanaDashboard.job.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Grafana Dashboard container + seccompProfile: + type: RuntimeDefault + ## grafanaDashboard.job.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## Job Grafana Dashboard container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## grafanaDashboard.job.resources.requests The requested resources for the job Grafana Dashboard container + ## grafanaDashboard.job.resources.limits The resources limits for the job Grafana Dashboard container + resources: + ## Example: + ## requests: + ## memory: "256Mi" + ## cpu: "200m" + requests: {} + ## limits: + ## memory: "1Gi" + ## cpu: "1000m" + limits: {} + +## Onlyoffice Docs tests parameters +tests: + ## tests.enabled Enable the resources creation necessary for ONLYOFFICE Docs launch testing and connected dependencies availability testing + ## These resources will be used when running the `helm test` command + enabled: true + ## tests.annotations Defines annotations that will be additionally added to Test Pod + ## If set to, it takes priority over the `commonAnnotations` + ## You can also use `tpl` as the value for the key + annotations: {} + ## tests.customPodAntiAffinity Prohibiting the scheduling of Test Pod relative to other Pods containing the specified labels on the same node + customPodAntiAffinity: {} + ## Pod affinity rules for Test Pod scheduling by nodes relative to other Pods + ## Pod affinity allow you to constrain which nodes Test Pod can be scheduled on based on the labels of Pods already running on that node + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + podAffinity: {} + ## Node affinity rules for Test Pod scheduling by nodes + ## Node affinity allow you to constrain which nodes Test Pod can be scheduled on based on node labels + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + nodeAffinity: {} + ## tests.nodeSelector Node labels for Test Pod assignment + ## If set to, it takes priority over the `nodeSelector` + nodeSelector: {} + ## tests.tolerations Tolerations for Test Pod assignment + ## If set to, it takes priority over the `tolerations` + tolerations: [] + ## Test container image parameters + image: + ## tests.image.repository test container image name + repository: onlyoffice/docs-utils + ## tests.image.tag test container image tag + tag: 8.1.1-2 + ## tests.image.pullPolicy test container image pull policy + pullPolicy: IfNotPresent + ## Configure a Security Context for the Test container + containerSecurityContext: + ## tests.containerSecurityContext.enabled Enable security context for the Test container + enabled: false + ## tests.containerSecurityContext.runAsUser User ID for the Test container + runAsUser: 101 + ## tests.containerSecurityContext.runAsGroup Group ID for the Test container + runAsGroup: 101 + ## tests.containerSecurityContext.runAsNonRoot Require that the container will run with a user with UID other than 0 + runAsNonRoot: true + ## tests.containerSecurityContext.allowPrivilegeEscalation Controls whether a process can gain more privileges than its parent process + allowPrivilegeEscalation: false + ## tests.containerSecurityContext.seccompProfile Defines the Seccomp profile for the Test container + seccompProfile: + type: RuntimeDefault + ## tests.containerSecurityContext.capabilities Defines the privileges granted to the process + capabilities: + drop: ["ALL"] + ## test container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + ## tests.resources.requests The requested resources for the test container + ## tests.resources.limits The resources limits for the test container + resources: + ## Example: + ## requests: + ## memory: "256Mi" + ## cpu: "200m" + requests: {} + ## limits: + ## memory: "512Mi" + ## cpu: "1000m" + limits: {} From 3f40c95249e9c104cd704156b48c2c980a40ad93 Mon Sep 17 00:00:00 2001 From: kireevdmitry Date: Mon, 5 Aug 2024 12:43:29 +0000 Subject: [PATCH 02/26] Add an action to upload charts to the repository --- .github/workflows/4testing_repo.yaml | 51 ++++++++++++++++++++++++++++ .github/workflows/stable_repo.yaml | 40 ++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 .github/workflows/4testing_repo.yaml create mode 100644 .github/workflows/stable_repo.yaml diff --git a/.github/workflows/4testing_repo.yaml b/.github/workflows/4testing_repo.yaml new file mode 100644 index 0000000..24120eb --- /dev/null +++ b/.github/workflows/4testing_repo.yaml @@ -0,0 +1,51 @@ +--- +name: Build 4testing repo + +on: + workflow_dispatch: + +jobs: + build: + name: Chart release + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set chart version + run: | + wget https://download.onlyoffice.com/charts/4testing/index.yaml -P /tmp + LATEST_VERSION=$(awk '/docs-shards:/{f=1};f{print}' /tmp/index.yaml | awk '/version:/ {print $2;}' | head -1) + NEW_VERSION=$(awk '/version:/ {print $2;}' Chart.yaml) + if [[ "$LATEST_VERSION" == *"$NEW_VERSION"* ]]; then + RC=${LATEST_VERSION: -1} + let "RC+=1" + else + RC='1' + fi + NEW_VERSION=$(echo $NEW_VERSION)-rc$RC + sed 's/\(version:\).*/\1 '$NEW_VERSION'/' -i Chart.yaml + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET }} + aws-region: us-east-1 + - name: Helm release + uses: shellbear/helm-release-action@v0.1 + with: + repo: ${{ secrets.AWS_BUCKET_URL }}/charts/4testing + chart: ./ + + - name: Reindex index.yaml + run: | + aws s3 cp ${{ secrets.AWS_BUCKET_URL }}/charts/4testing/index.yaml . + ls | grep index.yaml + sed -i "s|${{ secrets.AWS_BUCKET_URL }}|${{ secrets.AWS_CLOUDFRONT_URL }}|g" index.yaml + aws s3 cp index.yaml ${{ secrets.AWS_BUCKET_URL }}/charts/4testing/ --acl public-read + - name: Make public access to chart repo + run: | + mkdir s3dir + aws s3 cp ${{ secrets.AWS_BUCKET_URL }}/charts/4testing/ ./s3dir --recursive + aws s3 cp ./s3dir ${{ secrets.AWS_BUCKET_URL }}/charts/4testing --acl public-read --recursive + - name: Invalidate CLOUDFRONT cache + run: aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_DISTRIBUTION_ID }} --paths "/charts/4testing/*" diff --git a/.github/workflows/stable_repo.yaml b/.github/workflows/stable_repo.yaml new file mode 100644 index 0000000..3e9494a --- /dev/null +++ b/.github/workflows/stable_repo.yaml @@ -0,0 +1,40 @@ +--- +name: Release Chart on S3 repo + +on: + push: + branches: + - master + +jobs: + build: + name: Chart release + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v2 + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v1 + with: + aws-access-key-id: ${{ secrets.AWS_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET }} + aws-region: us-east-1 + - name: Helm release + uses: shellbear/helm-release-action@v0.1 + with: + repo: ${{ secrets.AWS_BUCKET_URL }}/charts/stable + chart: ./ + + - name: Reindex index.yaml + run: | + aws s3 cp ${{ secrets.AWS_BUCKET_URL }}/charts/stable/index.yaml . + ls | grep index.yaml + sed -i "s|${{ secrets.AWS_BUCKET_URL }}|${{ secrets.AWS_CLOUDFRONT_URL }}|g" index.yaml + aws s3 cp index.yaml ${{ secrets.AWS_BUCKET_URL }}/charts/stable/ --acl public-read + - name: Make public access to chart repo + run: | + mkdir s3dir + aws s3 cp ${{ secrets.AWS_BUCKET_URL }}/charts/stable/ ./s3dir --recursive + aws s3 cp ./s3dir ${{ secrets.AWS_BUCKET_URL }}/charts/stable --acl public-read --recursive + - name: Invalidate CLOUDFRONT cache + run: aws cloudfront create-invalidation --distribution-id ${{ secrets.AWS_DISTRIBUTION_ID }} --paths "/charts/stable/*" From 96e0736c68bfab83dddee339bf07b626ffedecfa Mon Sep 17 00:00:00 2001 From: danilapog Date: Mon, 5 Aug 2024 16:00:41 +0300 Subject: [PATCH 03/26] Remove all mentions about Redis dependency chart --- Chart.yaml | 5 - README.md | 138 ++++++++------------- templates/_helpers.tpl | 15 +-- templates/configmaps/balancer-snippet.yaml | 2 +- values.yaml | 16 --- 5 files changed, 55 insertions(+), 121 deletions(-) diff --git a/Chart.yaml b/Chart.yaml index bf69783..6e67469 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -13,8 +13,3 @@ dependencies: version: 4.9.0 repository: https://kubernetes.github.io/ingress-nginx condition: ingress-nginx.enabled - -- name: redis - version: 19.5.3 - repository: https://charts.bitnami.com/bitnami - condition: redis.enabled diff --git a/README.md b/README.md index d6b210e..cb85379 100644 --- a/README.md +++ b/README.md @@ -6,21 +6,21 @@ ONLYOFFICE Docs for Kubernetes - [Deploy prerequisites](#deploy-prerequisites) * [1. Add Helm repositories](#1-add-helm-repositories) * [2. Install Persistent Storage](#2-install-persistent-storage) - * [3. Configure dependent charts](#3-configure-dependent-charts) - + [3.1 Configure redis/bitnami subchart](#31-configure-redisbitnami-subchart) - + [3.2 Configure ingress-nginx/kubernetes subchart](#32-configure-ingress-nginxkubernetes-subchart) - * [4. Deploy StatsD exporter](#4-deploy-statsd-exporter) - + [4.1 Add Helm repositories](#41-add-helm-repositories) - + [4.2 Installing Prometheus](#42-installing-prometheus) - + [4.3 Installing StatsD exporter](#43-installing-statsd-exporter) - * [5. Make changes to Node-config configuration files](#5-make-changes-to-Node-config-configuration-files) - + [5.1 Create a ConfigMap containing a json file](#51-create-a-configmap-containing-a-json-file) - + [5.2 Specify parameters when installing ONLYOFFICE Docs](#52-specify-parameters-when-installing-onlyoffice-docs) - * [6. Add custom Fonts](#6-add-custom-fonts) - * [7. Add Plugins](#7-add-plugins) - * [8. Change interface themes](#8-change-interface-themes) - + [8.1 Create a ConfigMap containing a json file](#81-create-a-configmap-containing-a-json-file) - + [8.2 Specify parameters when installing ONLYOFFICE Docs](#82-specify-parameters-when-installing-onlyoffice-docs) + * [3. Deploy Redis](#3-deploy-redis) + * [4. Configure dependent charts](#4-configure-dependent-charts) + + [4.1 Configure ingress-nginx/kubernetes subchart](#41-configure-ingress-nginxkubernetes-subchart) + * [5. Deploy StatsD exporter](#5-deploy-statsd-exporter) + + [5.1 Add Helm repositories](#51-add-helm-repositories) + + [5.2 Installing Prometheus](#52-installing-prometheus) + + [5.3 Installing StatsD exporter](#53-installing-statsd-exporter) + * [6. Make changes to Node-config configuration files](65-make-changes-to-Node-config-configuration-files) + + [6.1 Create a ConfigMap containing a json file](#61-create-a-configmap-containing-a-json-file) + + [6.2 Specify parameters when installing ONLYOFFICE Docs](#62-specify-parameters-when-installing-onlyoffice-docs) + * [7. Add custom Fonts](#7-add-custom-fonts) + * [8. Add Plugins](#8-add-plugins) + * [9. Change interface themes](#9-change-interface-themes) + + [9.1 Create a ConfigMap containing a json file](#91-create-a-configmap-containing-a-json-file) + + [9.2 Specify parameters when installing ONLYOFFICE Docs](#92-specify-parameters-when-installing-onlyoffice-docs) - [Deploy ONLYOFFICE Docs](#deploy-onlyoffice-docs) * [1. Deploy the ONLYOFFICE Docs license](#1-deploy-the-onlyoffice-docs-license) + [1.1 Create secret](#11-create-secret) @@ -39,10 +39,7 @@ ONLYOFFICE Docs for Kubernetes * [8. ONLYOFFICE Docs installation test (optional)](#8-onlyoffice-docs-installation-test-optional) * [9. Access to the info page (optional)](#9-access-to-the-info-page-optional) * [10. Deploy ONLYOFFICE Docs with your own dependency (optional)](#10-deploy-onlyoffice-docs-with-your-own-dependency-optional) - * [10.1 Use your own Redis](#101-use-your-own-redis) - + [10.1.1 Connect ONLYOFFICE Docs to Redis using password](#1011-connect-to-redis-using-password) - + [10.1.2 Connect ONLYOFFICE Docs to Redis using existing secret](#1012-alternative-connect-to-redis-using-existing-secret) - * [10.2 Use your own nginx-ingress controller](#102-use-your-own-nginx-ingress-controller) + * [10.1 Use your own nginx-ingress controller](#101-use-your-own-nginx-ingress-controller) - [Using Grafana to visualize metrics (optional)](#using-grafana-to-visualize-metrics-optional) * [1. Deploy Grafana](#1-deploy-grafana) + [1.1 Deploy Grafana without installing ready-made dashboards](#11-deploy-grafana-without-installing-ready-made-dashboards) @@ -61,6 +58,7 @@ ONLYOFFICE Docs for Kubernetes ### 1. Add Helm repositories ```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami $ helm repo add nfs-server-provisioner https://kubernetes-sigs.github.io/nfs-ganesha-server-and-external-provisioner $ helm repo add onlyoffice https://download.onlyoffice.com/charts/stable $ helm repo update @@ -102,37 +100,31 @@ Configure a Persistent Volume Claim Note: If you want to enable `WOPI`, please set the parameter `wopi.enabled=true`. In this case Persistent Storage must be connected to the cluster nodes with the disabled caching attributes for the mounted directory for the clients. For NFS Server Provisioner it can be achieved by adding `noac` option to the parameter `storageClass.mountOptions`. Please find more information [here](https://github.com/kubernetes-sigs/nfs-ganesha-server-and-external-provisioner/blob/master/charts/nfs-server-provisioner/values.yaml#L83). -### 3. Configure dependent charts +### 3. Deploy Redis -ONLYOFFICE Docs use redis by bitnami and ingress-nginx by kubernetes as dependencies charts. This bundle ingress-nginx+redis is used to implement balancing in sharded mode. You can manage the configuration of dependent charts, or disable them to use your dependencies. +To install Redis to your cluster, run the following command: -If you want to manage the configuration of dependent charts, please check section [#3.1](#31-configure-redisbitnami-subchart) for Redis and [#3.2](#32-configure-ingress-nginxkubernetes-subchart) for ingress-nginx controller +```bash +$ helm install redis bitnami/redis \ + --set architecture=standalone \ + --set master.persistence.storageClass=PERSISTENT_STORAGE_CLASS \ + --set master.persistence.size=8Gi \ + --set metrics.enabled=false +``` -(Optional) Also, you can use your own Redis or ingress-nginx controller, for more information please refer to step [#10](#10-deploy-onlyoffice-docs-with-your-own-dependency-optional) +Note: Set the `metrics.enabled=true` to enable exposing Redis metrics to be gathered by Prometheus. -#### 3.1 Configure redis/bitnami subchart +See more details about installing Redis via Helm [here](https://github.com/bitnami/charts/tree/main/bitnami/redis). -Redis/bitnami subchart is **enabled by default** +### 4. Configure dependent charts -Note: Set the `redis.metrics.enabled=true` to enable exposing Redis metrics to be gathered by Prometheus. +ONLYOFFICE Docs use ingress-nginx by kubernetes as dependencies chart. Bundle nginx-ingress+Redis is used to implement balancing in sharded mode. You can manage the configuration of dependent chart, or disable it to use your own nginx-ingress controller. -Some overridden values ​​for the Redis/Bitnami subchart can be found in the table below: +If you want to manage the configuration of ingress-nginx controller dependent chart, please check section [#4.1](#41-configure-ingress-nginxkubernetes-subchart) -### Redis subchart parameters +(Optional) Also, you can use your own ingress-nginx controller, for more information please refer to step [#10](#10-deploy-onlyoffice-docs-with-your-own-dependency-optional) -| Parameter | Description | Default | -|-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| -| `redis.master.persistence.size` | Persistent Volume size | `8Gi` | -| `redis.enabled` | Define that to enable or disable Redis/Bitnami subchart during deployment | `true` | -| `redis.architecture` | Redis® architecture. Allowed values: standalone or replication | `standalone` | -| `redis.secretAnnotations` | Annotations to add to secret. Some service annotations added for correct deployment along with the ONLYOFFICE Docs chart | `helm.sh/hook: pre-install helm.sh/hook-weight: "1"` | -| `redis.master.persistence.storageClass` | Persistent Volume storage class | `""` | -| `redis.metric.enabled` | Start a sidecar prometheus exporter to expose Redis® metrics | `false` | -| `redis.auth.password` | Redis® password | `""` | - -See more details about installing Redis via Helm [here](https://github.com/bitnami/charts/tree/main/bitnami/redis). - -#### 3.2 Configure ingress-nginx/kubernetes subchart +#### 4.1 Configure ingress-nginx/kubernetes subchart ingress-nginx/kubernetes subchart is **enabled by default** @@ -154,11 +146,11 @@ Some overridden values ​​for the ingress-nginx/Kubernetes subchart can be fo See more details about installing ingress-nginx via Helm [here](https://github.com/kubernetes/ingress-nginx/tree/main/charts/ingress-nginx). -### 4. Deploy StatsD exporter +### 5. Deploy StatsD exporter -*This step is optional. You can skip step [#4](#4-deploy-statsd-exporter) entirely if you don't want to run StatsD exporter* +*This step is optional. You can skip step [#5](#5-deploy-statsd-exporter) entirely if you don't want to run StatsD exporter* -#### 4.1 Add Helm repositories +#### 5.1 Add Helm repositories ```bash $ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts @@ -166,7 +158,7 @@ $ helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metri $ helm repo update ``` -#### 4.2 Installing Prometheus +#### 5.2 Installing Prometheus To install Prometheus to your cluster, run the following command: @@ -179,7 +171,7 @@ To change the scrape interval, specify the `server.global.scrape_interval` param See more details about installing Prometheus via Helm [here](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus). -#### 4.3 Installing StatsD exporter +#### 5.3 Installing StatsD exporter To install StatsD exporter to your cluster, run the following command: @@ -194,11 +186,11 @@ See more details about installing Prometheus StatsD exporter via Helm [here](htt To allow the StatsD metrics in ONLYOFFICE Docs, follow step [5.2](#52-metrics-deployment-optional) -### 5. Make changes to Node-config configuration files +### 6. Make changes to Node-config configuration files -*This step is optional. You can skip step [#5](#5-make-changes-to-node-config-configuration-files) entirely if you don't need to make changes to the configuration files* +*This step is optional. You can skip step [#6](#6-make-changes-to-node-config-configuration-files) entirely if you don't need to make changes to the configuration files* -#### 5.1 Create a ConfigMap containing a json file +#### 6.1 Create a ConfigMap containing a json file In order to create a ConfigMap from a file that contains the `production-linux-local.json` structure, you need to run the following command: @@ -209,33 +201,33 @@ $ kubectl create configmap custom-local-config \ Note: Any name except `local-config` can be used instead of `custom-local-config`. -#### 5.2 Specify parameters when installing ONLYOFFICE Docs +#### 6.2 Specify parameters when installing ONLYOFFICE Docs When installing ONLYOFFICE Docs, specify the `extraConf.configMap=custom-local-config` and `extraConf.filename=production-linux-local.json` parameters -Note: If you need to add a configuration file after the ONLYOFFICE Docs is already installed, you need to execute step [5.1](#51-create-a-configmap-containing-a-json-file) +Note: If you need to add a configuration file after the ONLYOFFICE Docs is already installed, you need to execute step [6.1](#61-create-a-configmap-containing-a-json-file) and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraConf.configMap=custom-local-config --set extraConf.filename=production-linux-local.json` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. -### 6. Add custom Fonts +### 7. Add custom Fonts -*This step is optional. You can skip step [#6](#6-add-custom-fonts) entirely if you don't need to add your fonts* +*This step is optional. You can skip step [#7](#7-add-custom-fonts) entirely if you don't need to add your fonts* In order to add fonts to images, you need to rebuild the images. Refer to the relevant steps in [this](https://github.com/ONLYOFFICE/Docker-Docs#building-onlyoffice-docs) manual. Then specify your images when installing the ONLYOFFICE Docs. -### 7. Add Plugins +### 8. Add Plugins -*This step is optional. You can skip step [#7](#7-add-plugins) entirely if you don't need to add plugins* +*This step is optional. You can skip step [#8](#8-add-plugins) entirely if you don't need to add plugins* In order to add plugins to images, you need to rebuild the images. Refer to the relevant steps in [this](https://github.com/ONLYOFFICE/Docker-Docs#building-onlyoffice-docs) manual. Then specify your images when installing the ONLYOFFICE Docs. -### 8. Change interface themes +### 9. Change interface themes -*This step is optional. You can skip step [#8](#8-change-interface-themes) entirely if you don't need to change the interface themes* +*This step is optional. You can skip step [#9](#9-change-interface-themes) entirely if you don't need to change the interface themes* -#### 8.1 Create a ConfigMap containing a json file +#### 9.1 Create a ConfigMap containing a json file To create a ConfigMap with a json file that contains the interface themes, you need to run the following command: @@ -246,11 +238,11 @@ $ kubectl create configmap custom-themes \ Note: Instead of `custom-themes` and `custom-themes.json` you can use any other names. -#### 8.2 Specify parameters when installing ONLYOFFICE Docs +#### 9.2 Specify parameters when installing ONLYOFFICE Docs When installing ONLYOFFICE Docs, specify the `extraThemes.configMap=custom-themes` and `extraThemes.filename=custom-themes.json` parameters. -Note: If you need to add interface themes after the ONLYOFFICE Docs is already installed, you need to execute step [5.1](#51-create-a-configmap-containing-a-json-file) +Note: If you need to add interface themes after the ONLYOFFICE Docs is already installed, you need to execute step [6.1](#61-create-a-configmap-containing-a-json-file) and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraThemes.configMap=custom-themes --set extraThemes.filename=custom-themes.json` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. @@ -733,31 +725,7 @@ You can further limit the access to the `info` page using Nginx [Basic Authentic ### 10. Deploy ONLYOFFICE Docs with your own dependency (optional) -### 10.1 Use your own Redis - -#### 10.1.1 Connect to Redis using password - -To use your own Redis, you need to disable the Redis/bitnami subchart during ONLYOFFICE Docs deployment and configure `connections.redis` options. - -For deploy ONLYOFFICE Docs and connect to existing Redis using `connections.redisHost` and `connections.redisPassword` follow the command: - -```bash -$ helm install documentserver onlyoffice/docs-shards --set redis.enabled=false --set connections.redisHost=YOUR_REDIS_HOST --set connections.redisPassword=YOUR_SECURE_PWD -``` - -Note: This command will create a secret with the password value that you set, and use the value from this secret to connect to Redis. - -#### 10.1.2 (Alternative) Connect to Redis using existing secret - -Alternative, you can create secret with the Redis password by yourself, and specify `connections.redisExistingSecret` parameter during deployment ONLYOFFICE Docs , for example: - -```bash -$ helm install documentserver onlyoffice/docs-shards --set redis.enabled=false --set connections.redisHost=YOUR_REDIS_HOST --set connections.redisExistingSecret=YOUR_SECRET_NAME -``` - -Note: In your own secret, the key that contains the password must be named `redis-password`. If this is not the case, add a parameter that will override key name in the secret with parameter `redisSecretKeyName`. - -### 10.2 Use your own nginx-ingress controller +### 10.1 Use your own nginx-ingress controller **Note:** ONLYOFFICE Docs support **only** nginx-ingress controller [by the kubernetes](https://github.com/kubernetes/ingress-nginx). diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl index 157523b..5935718 100644 --- a/templates/_helpers.tpl +++ b/templates/_helpers.tpl @@ -4,10 +4,8 @@ Get the Redis password secret {{- define "ds.redis.secretName" -}} {{- if or .Values.connections.redisPassword .Values.connections.redisNoPass -}} {{- printf "%s-redis" .Release.Name -}} -{{- else if and (not .Values.redis.enabled) (.Values.connections.redisExistingSecret) -}} +{{- else if .Values.connections.redisExistingSecret -}} {{- printf "%s" (tpl .Values.connections.redisExistingSecret $) -}} -{{- else if and .Values.redis.enabled .Values.redis.auth.password -}} - {{- printf "%s-%s" .Release.Name (tpl .Values.connections.redisExistingSecret $) -}} {{- end -}} {{- end -}} @@ -20,22 +18,11 @@ Get the redis password {{- $keyValue := (get $secretKey .Values.connections.redisSecretKeyName) | b64dec }} {{- if .Values.connections.redisPassword -}} {{- printf "%s" .Values.connections.redisPassword -}} -{{- else if and .Values.redis.enabled .Values.redis.auth.password -}} - {{- printf "%s" .Values.redis.auth.password -}} {{- else if $keyValue -}} {{- printf "%s" $keyValue -}} {{- end -}} {{- end -}} -{{/* -Return ds release name prefix for redis host name if redis subchart was deployed -*/}} -{{- define "ds.redis.subchart.prefix" -}} -{{- if .Values.redis.enabled -}} - {{- printf "%s-" .Release.Name -}} -{{- end -}} -{{- end -}} - {{/* Return true if a secret object should be created for Redis */}} diff --git a/templates/configmaps/balancer-snippet.yaml b/templates/configmaps/balancer-snippet.yaml index 1effff7..2dfa0e1 100644 --- a/templates/configmaps/balancer-snippet.yaml +++ b/templates/configmaps/balancer-snippet.yaml @@ -140,7 +140,7 @@ data: if API_ARG then local API_KEY = handle_api_key(API_ARG) red:set_timeouts(1000, 1000, 1000) -- 1 sec - local ok, err = red:connect("{{ include "ds.redis.subchart.prefix" . }}{{ .Values.connections.redisHost }}", {{ .Values.connections.redisPort }}) + local ok, err = red:connect({{ .Values.connections.redisHost | quote }}, {{ .Values.connections.redisPort }}) if not ok then ngx.say("1: failed to connect: ",err) return diff --git a/values.yaml b/values.yaml index b00e78f..ebf7280 100644 --- a/values.yaml +++ b/values.yaml @@ -24,22 +24,6 @@ ingress-nginx: service: annotations: {} -## redis.enabled parameters for manage redis subchart condition -redis: - enabled: true - architecture: "standalone" - secretAnnotations: - helm.sh/hook: pre-install - helm.sh/hook-weight: "1" - master: - persistence: - size: 8Gi - storageClass: "" - metrics: - enabled: false - auth: - password: "" - ## Default values for Onlyoffice Docs ## product.name Specifies name of the product From ffd8492a3e7388d8de5821aadaf8d7f6631a5cee Mon Sep 17 00:00:00 2001 From: kireevdmitry Date: Mon, 5 Aug 2024 13:15:36 +0000 Subject: [PATCH 04/26] Add subcharts loading --- .github/workflows/4testing_repo.yaml | 1 + .github/workflows/stable_repo.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/workflows/4testing_repo.yaml b/.github/workflows/4testing_repo.yaml index 24120eb..b15b0dd 100644 --- a/.github/workflows/4testing_repo.yaml +++ b/.github/workflows/4testing_repo.yaml @@ -35,6 +35,7 @@ jobs: with: repo: ${{ secrets.AWS_BUCKET_URL }}/charts/4testing chart: ./ + packageExtraArgs: --dependency-update - name: Reindex index.yaml run: | diff --git a/.github/workflows/stable_repo.yaml b/.github/workflows/stable_repo.yaml index 3e9494a..4bc5305 100644 --- a/.github/workflows/stable_repo.yaml +++ b/.github/workflows/stable_repo.yaml @@ -24,6 +24,7 @@ jobs: with: repo: ${{ secrets.AWS_BUCKET_URL }}/charts/stable chart: ./ + packageExtraArgs: --dependency-update - name: Reindex index.yaml run: | From c1ee9cac793008929a7bc2a2a6e5bc76e07c5817 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Mon, 5 Aug 2024 16:26:49 +0300 Subject: [PATCH 05/26] Small fix Delete some points about the Redis subchart --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index cb85379..b0f87a7 100644 --- a/README.md +++ b/README.md @@ -275,7 +275,7 @@ Note: If you need to add license after the ONLYOFFICE Docs is already installed, To deploy ONLYOFFICE Docs with the release name `documentserver`: ```bash -$ helm install documentserver onlyoffice/docs-shards --set redis.master.persistence.storageClass=PERSISTENT_STORAGE_CLASS +$ helm install documentserver onlyoffice/docs-shards ``` The command deploys ONLYOFFICE Docs on the Kubernetes cluster in the default configuration. The [Parameters](#4-parameters) section lists the parameters that can be configured during installation. @@ -303,7 +303,7 @@ The `helm delete` command removes all the Kubernetes components associated with | `connections.redisDBNum` | Number of the redis logical database to be [selected](https://redis.io/commands/select/). The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file | `0` | | `connections.redisClusterNodes` | List of nodes in the Redis cluster. There is no need to specify every node in the cluster, 3 should be enough. You can specify multiple values. It must be specified in the `host:port` format | `[]` | | `connections.redisSentinelGroupName` | Name of a group of Redis instances composed of a master and one or more slaves. Used if `connections.redisConnectorName` is set to `ioredis` | `mymaster` | -| `connections.redisPassword` | The password set for the Redis account. If set to, it takes priority over the `connections.redisExistingSecret` and `redis.auth.password`. The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file| `""` | +| `connections.redisPassword` | The password set for the Redis account. If set to, it takes priority over the `connections.redisExistingSecret`. The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file| `""` | | `connections.redisSecretKeyName` | The name of the key that contains the Redis user password | `redis-password` | | `connections.redisExistingSecret` | Name of existing secret to use for Redis passwords. Must contain the key specified in `connections.redisSecretKeyName`. The password from this secret overrides password set in the `options` object in `local.json` | `redis` | | `connections.redisNoPass` | Defines whether to use a Redis auth without a password. If the connection to Redis server does not require a password, set the value to `true` | `false` | @@ -758,7 +758,7 @@ $ helm upgrade ingress-nginx --repo https://kubernetes.gi **Now**, when your nginx-ingress controller if configure, you can deploy ONLYOFFICE Docs with command: ```bash -$ helm install docs onlyoffice/docs-shards --set ingress-nginx.enabled=false --set redis.master.persistence.storageClass=PERSISTENT_STORAGE_CLASS --set redis.auth.password= +$ helm install docs onlyoffice/docs-shards --set ingress-nginx.enabled=false ``` ## Using Grafana to visualize metrics (optional) From 130e4225c08243ce98c059a14d7c76e914a4fcd8 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Mon, 5 Aug 2024 16:32:33 +0300 Subject: [PATCH 06/26] Small links fixes --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index b0f87a7..a4b9327 100644 --- a/README.md +++ b/README.md @@ -832,11 +832,11 @@ See more details about installing Grafana via Helm [here](https://github.com/bit ### 2 Access to Grafana via Ingress -Note: It is assumed that step [#5.3.2.1](#5321-installing-the-kubernetes-nginx-ingress-controller) has already been completed. +Note: It is assumed that step, please make sure that the nginx-ingress controller is installed in your cluster. If you already deploy ONLYOFFICE Docs and did not turn off the controller with the parameter `ingress-nginx.enabled=false` it is already present in the cluster. If ONLYOFFICE Docs was installed with the parameter `grafana.ingress.enabled` (step [#5.2](#52-metrics-deployment-optional)) then access to Grafana will be at: `http://INGRESS-ADDRESS/grafana/` -If Ingres was installed using a secure connection (step [#5.3.2.3](#5323-expose-onlyoffice-docs-via-https)), then access to Grafana will be at: `https://your-domain-name/grafana/` +If Ingres was installed using a secure connection (step [#5.3](#53-expose-onlyoffice-docs-via-https)), then access to Grafana will be at: `https://your-domain-name/grafana/` ### 3. View gathered metrics in Grafana From ae763e42823365618d0c89f7ac6d5e53936fb128 Mon Sep 17 00:00:00 2001 From: danilapog Date: Mon, 5 Aug 2024 16:35:03 +0300 Subject: [PATCH 07/26] Remove call deleted function --- templates/configmaps/documentserver.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/configmaps/documentserver.yaml b/templates/configmaps/documentserver.yaml index 9da380c..2ee4cc9 100644 --- a/templates/configmaps/documentserver.yaml +++ b/templates/configmaps/documentserver.yaml @@ -17,7 +17,7 @@ data: DB_PORT: "5432" DB_NAME: "postgres" REDIS_CONNECTOR_NAME: {{ .Values.connections.redisConnectorName }} - REDIS_SERVER_HOST: {{ include "ds.redis.subchart.prefix" . }}{{ .Values.connections.redisHost }} + REDIS_SERVER_HOST: {{ .Values.connections.redisHost }} REDIS_SERVER_PORT: {{ .Values.connections.redisPort | quote }} REDIS_SERVER_USER: {{ .Values.connections.redisUser }} REDIS_SERVER_DB_NUM: {{ .Values.connections.redisDBNum | quote }} From f8d09642fa894ea5c5d28ec28980010f1acba92a Mon Sep 17 00:00:00 2001 From: kireevdmitry Date: Mon, 5 Aug 2024 13:39:58 +0000 Subject: [PATCH 08/26] Fix getting chart version --- .github/workflows/4testing_repo.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/4testing_repo.yaml b/.github/workflows/4testing_repo.yaml index b15b0dd..154932d 100644 --- a/.github/workflows/4testing_repo.yaml +++ b/.github/workflows/4testing_repo.yaml @@ -15,7 +15,7 @@ jobs: run: | wget https://download.onlyoffice.com/charts/4testing/index.yaml -P /tmp LATEST_VERSION=$(awk '/docs-shards:/{f=1};f{print}' /tmp/index.yaml | awk '/version:/ {print $2;}' | head -1) - NEW_VERSION=$(awk '/version:/ {print $2;}' Chart.yaml) + NEW_VERSION=$(awk '/version:/ {print $2;}' Chart.yaml | head -1) if [[ "$LATEST_VERSION" == *"$NEW_VERSION"* ]]; then RC=${LATEST_VERSION: -1} let "RC+=1" From a9aad7a0444acb24e9741de9e3a14dc7d2d366a7 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Mon, 5 Aug 2024 16:49:07 +0300 Subject: [PATCH 09/26] Fix the link to a broken section list item --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index a4b9327..e791839 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ ONLYOFFICE Docs for Kubernetes + [5.1 Add Helm repositories](#51-add-helm-repositories) + [5.2 Installing Prometheus](#52-installing-prometheus) + [5.3 Installing StatsD exporter](#53-installing-statsd-exporter) - * [6. Make changes to Node-config configuration files](65-make-changes-to-Node-config-configuration-files) + * [6. Make changes to Node-config configuration files](#6-make-changes-to-Node-config-configuration-files) + [6.1 Create a ConfigMap containing a json file](#61-create-a-configmap-containing-a-json-file) + [6.2 Specify parameters when installing ONLYOFFICE Docs](#62-specify-parameters-when-installing-onlyoffice-docs) * [7. Add custom Fonts](#7-add-custom-fonts) From 87e953ce72251cd5c294f13fefa52bb2cf91119d Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 6 Aug 2024 11:44:59 +0300 Subject: [PATCH 10/26] Fix typo in `common parameters` Also refactoring own/exist ingress-nginx configuration --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index e791839..95ad9fd 100644 --- a/README.md +++ b/README.md @@ -297,7 +297,7 @@ The `helm delete` command removes all the Kubernetes components associated with | Parameter | Description | Default | |-------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------| | `connections.redisConnectorName` | Defines which connector to use to connect to Redis. If you need to connect to Redis Sentinel, set the value `ioredis` | `redis` | -| `connections.redistHost` | The IP address or the name of the Redis host | `redis-master` | +| `connections.redisHost` | The IP address or the name of the Redis host | `redis-master.default.svc.cluster.local` | | `connections.redisPort` | The Redis server port number | `6379` | | `connections.redisUser` | The Redis [user](https://redis.io/docs/management/security/acl/) name. The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file | `default` | | `connections.redisDBNum` | Number of the redis logical database to be [selected](https://redis.io/commands/select/). The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file | `0` | @@ -740,7 +740,7 @@ If you want to deploy ONLYOFFICE Docs in cluster where already exist nginx-ingre > All available Redis connections parameters present [here](#4-parameters) with the `connections.` prefix ```bash -helm template docs onlyoffice/docs-shards --set connections.redisPassword= --set documentserver.ingressCustomConfigMapsNamespace= --show-only templates/configmaps/balancer-snippet.yaml --show-only templates/configmaps/balancer-lua.yaml --dry-run=client > ./ingressConfigMaps.yaml +helm template docs onlyoffice/docs-shards --set documentserver.ingressCustomConfigMapsNamespace= --show-only templates/configmaps/balancer-snippet.yaml --show-only templates/configmaps/balancer-lua.yaml --dry-run=client > ./ingressConfigMaps.yaml ``` **The second step**, apply configMaps that you create with command below: From 35338c6667f6ad932baadbd0c9e7f020abe0f4d4 Mon Sep 17 00:00:00 2001 From: VyacheslavSemin Date: Tue, 6 Aug 2024 11:43:48 +0000 Subject: [PATCH 11/26] Fix the default values --- Chart.yaml | 2 +- values.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Chart.yaml b/Chart.yaml index 6e67469..47adb23 100644 --- a/Chart.yaml +++ b/Chart.yaml @@ -10,6 +10,6 @@ appVersion: 8.1.1 dependencies: - name: ingress-nginx - version: 4.9.0 + version: 4.11.1 repository: https://kubernetes.github.io/ingress-nginx condition: ingress-nginx.enabled diff --git a/values.yaml b/values.yaml index ebf7280..456887c 100644 --- a/values.yaml +++ b/values.yaml @@ -213,7 +213,7 @@ license: log: ## log.level Defines the type and severity of a logged event ## Possible values are `ALL`, `TRACE`, `DEBUG`, `INFO`, `WARN`, `ERROR`, `FATAL`, `MARK`, `OFF` - level: ALL + level: WARN ## log.type Defines the format of a logged event ## Possible values are `pattern`, `json`, `basic`, `coloured`, `messagePassThrough`, `dummy` type: pattern @@ -595,7 +595,7 @@ documentserver: ## ref: https://nginx.org/en/docs/http/ngx_http_log_module.html#access_log ## Example: ## accessLog: "main" - accessLog: "main" + accessLog: "off" ## documentserver.proxy.gzipProxied Defines the nginx config gzip_proxied directive ## ref: https://nginx.org/en/docs/http/ngx_http_gzip_module.html#gzip_proxied gzipProxied: "off" From 30d88976a1fdc61da69a1aca405d8bf1c9b72c27 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 6 Aug 2024 15:43:16 +0300 Subject: [PATCH 12/26] Add update instructions --- README.md | 46 +++++++++++++++++++++++++++++++++------------- 1 file changed, 33 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 95ad9fd..d44dc65 100644 --- a/README.md +++ b/README.md @@ -34,12 +34,13 @@ ONLYOFFICE Docs for Kubernetes * [5.3 Expose ONLYOFFICE Docs via HTTPS](#53-expose-onlyoffice-docs-via-https) * [6. Scale ONLYOFFICE Docs (optional)](#6-scale-onlyoffice-docs-optional) + [6.1 Horizontal Pod Autoscaling](#61-horizontal-pod-autoscaling) - + [6.2 Manual scaling](#62-manual-scaling) - * [7. Update ONLYOFFICE Docs license (optional)](#7-update-onlyoffice-docs-license-optional) - * [8. ONLYOFFICE Docs installation test (optional)](#8-onlyoffice-docs-installation-test-optional) - * [9. Access to the info page (optional)](#9-access-to-the-info-page-optional) - * [10. Deploy ONLYOFFICE Docs with your own dependency (optional)](#10-deploy-onlyoffice-docs-with-your-own-dependency-optional) - * [10.1 Use your own nginx-ingress controller](#101-use-your-own-nginx-ingress-controller) + + [6.2 Manual scaling](#62-manual-scaling) + * [7. Update ONLYOFFICE Docs](#7-update-onlyoffice-docs) + * [8. Update ONLYOFFICE Docs license (optional)](#8-update-onlyoffice-docs-license-optional) + * [9. ONLYOFFICE Docs installation test (optional)](#9-onlyoffice-docs-installation-test-optional) + * [10. Access to the info page (optional)](#10-access-to-the-info-page-optional) + * [11. Deploy ONLYOFFICE Docs with your own dependency (optional)](#11-deploy-onlyoffice-docs-with-your-own-dependency-optional) + * [11.1 Use your own nginx-ingress controller](#111-use-your-own-nginx-ingress-controller) - [Using Grafana to visualize metrics (optional)](#using-grafana-to-visualize-metrics-optional) * [1. Deploy Grafana](#1-deploy-grafana) + [1.1 Deploy Grafana without installing ready-made dashboards](#11-deploy-grafana-without-installing-ready-made-dashboards) @@ -122,7 +123,7 @@ ONLYOFFICE Docs use ingress-nginx by kubernetes as dependencies chart. Bundle ng If you want to manage the configuration of ingress-nginx controller dependent chart, please check section [#4.1](#41-configure-ingress-nginxkubernetes-subchart) -(Optional) Also, you can use your own ingress-nginx controller, for more information please refer to step [#10](#10-deploy-onlyoffice-docs-with-your-own-dependency-optional) +(Optional) Also, you can use your own ingress-nginx controller, for more information please refer to step [#11](#11-deploy-onlyoffice-docs-with-your-own-dependency-optional) #### 4.1 Configure ingress-nginx/kubernetes subchart @@ -421,7 +422,7 @@ The `helm delete` command removes all the Kubernetes components associated with | `documentserver.proxy.workerProcesses` | Defines the nginx config worker_processes directive | `1` | | `documentserver.proxy.secureLinkSecret` | Defines secret for the nginx config directive [secure_link_md5](https://nginx.org/en/docs/http/ngx_http_secure_link_module.html#secure_link_md5) | `verysecretstring` | | `documentserver.proxy.infoAllowedIP` | Defines ip addresses for accessing the info page | `[]` | -| `documentserver.proxy.infoAllowedUser` | Defines user name for accessing the info page. If not set to, Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) will not be applied to access the info page. For more details, see [here](#12-access-to-the-info-page-optional) | `""` | +| `documentserver.proxy.infoAllowedUser` | Defines user name for accessing the info page. If not set to, Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) will not be applied to access the info page. For more details, see [here](#10-access-to-the-info-page-optional) | `""` | | `documentserver.proxy.infoAllowedPassword` | Defines user password for accessing the info page. Used if `proxy.infoAllowedUser` is set | `password` | | `documentserver.proxy.infoAllowedSecretKeyName` | The name of the key that contains the info auth user password. Used if `proxy.infoAllowedUser` is set | `info-auth-password` | | `documentserver.proxy.infoAllowedExistingSecret` | Name of existing secret to use for info auth password. Used if `proxy.infoAllowedUser` is set. Must contain the key specified in `proxy.infoAllowedSecretKeyName`. If set to, it takes priority over the `proxy.infoAllowedPassword` | `""` | @@ -668,7 +669,26 @@ $ kubectl scale -n default deployment documentserver --replicas=POD_COUNT where `POD_COUNT` is a number of the `documentserver` pods. -### 7. Update ONLYOFFICE Docs license (optional) +### 7. Update ONLYOFFICE Docs + +It's necessary to set the parameters for updating. For example, + +```bash +$ helm upgrade documentserver onlyoffice/docs \ + --set docservice.image.tag=[version] +``` + + > **Note**: also need to specify the parameters that were specified during installation + +Or modify the values.yaml file and run the command: + +```bash +$ helm upgrade documentserver -f values.yaml onlyoffice/docs +``` + +When the `helm upgrade` command is executed, all active documents will be forced closed and saved. During the period specified in the `documentserver.terminationGracePeriodSeconds` parameter, the balancing tables in Redis will be cleaned. + +### 8. Update ONLYOFFICE Docs license (optional) In order to update the license, you need to perform the following steps: - Place the license.lic file containing the new key in some directory @@ -682,7 +702,7 @@ $ kubectl create secret generic license --from-file=path/to/license.lic -n ``` -### 8. ONLYOFFICE Docs installation test (optional) +### 9. ONLYOFFICE Docs installation test (optional) You can test ONLYOFFICE Docs availability and access to connected dependencies by running the following command: @@ -714,7 +734,7 @@ Note: This testing is for informational purposes only and cannot guarantee 100% It may be that even though all checks are completed successfully, an error occurs in the application. In this case, more detailed information can be found in the application logs. -### 9. Access to the info page (optional) +### 10. Access to the info page (optional) The access to `/info` page is limited by default. In order to allow the access to it, you need to specify the IP addresses or subnets (that will be Proxy container clients in this case) using `proxy.infoAllowedIP` parameter. @@ -723,9 +743,9 @@ Generally the Pods / Nodes / Load Balancer addresses will actually be the client In this case the access to the info page will be available to everyone. You can further limit the access to the `info` page using Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) which you can turn on by setting `proxy.infoAllowedUser` parameter value and by setting the password using `proxy.infoAllowedPassword` parameter, alternatively you can use the existing secret with password by setting its name with `proxy.infoAllowedExistingSecret` parameter. -### 10. Deploy ONLYOFFICE Docs with your own dependency (optional) +### 11. Deploy ONLYOFFICE Docs with your own dependency (optional) -### 10.1 Use your own nginx-ingress controller +### 11.1 Use your own nginx-ingress controller **Note:** ONLYOFFICE Docs support **only** nginx-ingress controller [by the kubernetes](https://github.com/kubernetes/ingress-nginx). From 547854abc7295cea144226434ce4c1695a97b495 Mon Sep 17 00:00:00 2001 From: kireevdmitry Date: Tue, 6 Aug 2024 12:57:45 +0000 Subject: [PATCH 13/26] Fix setting chart version --- .github/workflows/4testing_repo.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/4testing_repo.yaml b/.github/workflows/4testing_repo.yaml index 154932d..218e98d 100644 --- a/.github/workflows/4testing_repo.yaml +++ b/.github/workflows/4testing_repo.yaml @@ -23,7 +23,7 @@ jobs: RC='1' fi NEW_VERSION=$(echo $NEW_VERSION)-rc$RC - sed 's/\(version:\).*/\1 '$NEW_VERSION'/' -i Chart.yaml + sed '0,/version/s/\(version:\).*/\1 '$NEW_VERSION'/' -i Chart.yaml - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v1 with: From 70cdc3515bde12f8efbf820764c358f7c3e69129 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 6 Aug 2024 16:37:58 +0300 Subject: [PATCH 14/26] Fix typo --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d44dc65..0f12959 100644 --- a/README.md +++ b/README.md @@ -674,7 +674,7 @@ where `POD_COUNT` is a number of the `documentserver` pods. It's necessary to set the parameters for updating. For example, ```bash -$ helm upgrade documentserver onlyoffice/docs \ +$ helm upgrade documentserver onlyoffice/docs-shards \ --set docservice.image.tag=[version] ``` @@ -683,7 +683,7 @@ $ helm upgrade documentserver onlyoffice/docs \ Or modify the values.yaml file and run the command: ```bash -$ helm upgrade documentserver -f values.yaml onlyoffice/docs +$ helm upgrade documentserver -f values.yaml onlyoffice/docs-shards ``` When the `helm upgrade` command is executed, all active documents will be forced closed and saved. During the period specified in the `documentserver.terminationGracePeriodSeconds` parameter, the balancing tables in Redis will be cleaned. From e2b159b05125857e920120699c3485091907625a Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 6 Aug 2024 16:47:01 +0300 Subject: [PATCH 15/26] Refactoring update instructions --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0f12959..f0ad825 100644 --- a/README.md +++ b/README.md @@ -686,7 +686,7 @@ Or modify the values.yaml file and run the command: $ helm upgrade documentserver -f values.yaml onlyoffice/docs-shards ``` -When the `helm upgrade` command is executed, all active documents will be forced closed and saved. During the period specified in the `documentserver.terminationGracePeriodSeconds` parameter, the balancing tables in Redis will be cleaned. +When the `helm upgrade` command is executed, replicas will be turned off one by one, and all active documents will be forced closed and saved. Also, disabled replicas will be removed from the Redis balancing tables. ### 8. Update ONLYOFFICE Docs license (optional) From 56b26612e49b3ccc4a3455a8c8e19da21b6f7ffa Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 6 Aug 2024 17:10:45 +0300 Subject: [PATCH 16/26] Refactoring update instructions --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f0ad825..0d61199 100644 --- a/README.md +++ b/README.md @@ -686,7 +686,7 @@ Or modify the values.yaml file and run the command: $ helm upgrade documentserver -f values.yaml onlyoffice/docs-shards ``` -When the `helm upgrade` command is executed, replicas will be turned off one by one, and all active documents will be forced closed and saved. Also, disabled replicas will be removed from the Redis balancing tables. +When the `helm upgrade` command is executed, replicas will be turned off one by one, and active documents on disabled replicas will be forced closed and saved. Also, disabled replicas will be removed from the Redis balancing tables. ### 8. Update ONLYOFFICE Docs license (optional) From 7f31f13c3a4ba7375d5c840929e12c564013aa92 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 6 Aug 2024 17:24:10 +0300 Subject: [PATCH 17/26] Fix local config name --- README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 95ad9fd..8827312 100644 --- a/README.md +++ b/README.md @@ -299,13 +299,13 @@ The `helm delete` command removes all the Kubernetes components associated with | `connections.redisConnectorName` | Defines which connector to use to connect to Redis. If you need to connect to Redis Sentinel, set the value `ioredis` | `redis` | | `connections.redisHost` | The IP address or the name of the Redis host | `redis-master.default.svc.cluster.local` | | `connections.redisPort` | The Redis server port number | `6379` | -| `connections.redisUser` | The Redis [user](https://redis.io/docs/management/security/acl/) name. The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file | `default` | -| `connections.redisDBNum` | Number of the redis logical database to be [selected](https://redis.io/commands/select/). The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file | `0` | +| `connections.redisUser` | The Redis [user](https://redis.io/docs/management/security/acl/) name. The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file | `default` | +| `connections.redisDBNum` | Number of the redis logical database to be [selected](https://redis.io/commands/select/). The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file | `0` | | `connections.redisClusterNodes` | List of nodes in the Redis cluster. There is no need to specify every node in the cluster, 3 should be enough. You can specify multiple values. It must be specified in the `host:port` format | `[]` | | `connections.redisSentinelGroupName` | Name of a group of Redis instances composed of a master and one or more slaves. Used if `connections.redisConnectorName` is set to `ioredis` | `mymaster` | -| `connections.redisPassword` | The password set for the Redis account. If set to, it takes priority over the `connections.redisExistingSecret`. The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file| `""` | +| `connections.redisPassword` | The password set for the Redis account. If set to, it takes priority over the `connections.redisExistingSecret`. The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file| `""` | | `connections.redisSecretKeyName` | The name of the key that contains the Redis user password | `redis-password` | -| `connections.redisExistingSecret` | Name of existing secret to use for Redis passwords. Must contain the key specified in `connections.redisSecretKeyName`. The password from this secret overrides password set in the `options` object in `local.json` | `redis` | +| `connections.redisExistingSecret` | Name of existing secret to use for Redis passwords. Must contain the key specified in `connections.redisSecretKeyName`. The password from this secret overrides password set in the `options` object in `production-linux-local.json` | `redis` | | `connections.redisNoPass` | Defines whether to use a Redis auth without a password. If the connection to Redis server does not require a password, set the value to `true` | `false` | | `webProxy.enabled` | Specify whether a Web proxy is used in your network to access the Pods of k8s cluster to the Internet | `false` | | `webProxy.http` | Web Proxy address for `HTTP` traffic | `http://proxy.example.com` | From be94648fe97a4a7fa6699f87cdc69430eeb760e5 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 6 Aug 2024 17:25:32 +0300 Subject: [PATCH 18/26] Fix local config name in values --- values.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/values.yaml b/values.yaml index 456887c..533c071 100644 --- a/values.yaml +++ b/values.yaml @@ -49,11 +49,11 @@ connections: redisPort: "6379" ## connections.redisUser The Redis user name ## ref: https://redis.io/docs/management/security/acl/ - ## The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file + ## The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file redisUser: default ## connections.redisDBNum Number of the redis logical database to be selected ## ref: https://redis.io/commands/select/ - ## The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file + ## The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file ## Not used if values are set in `connections.redisClusterNodes` redisDBNum: "0" ## connections.redisClusterNodes List of nodes in the Redis cluster @@ -70,14 +70,14 @@ connections: redisSentinelGroupName: mymaster ## connections.redisExistingSecret Name of existing secret to use for Redis passwords ## Must contain the key specified in `connections.redisSecretKeyName` - ## The password from this secret overrides the value for the password set in the `options` object in `local.json` if you add custom configuration file + ## The password from this secret overrides the value for the password set in the `options` object in `production-linux-local.json` if you add custom configuration file redisExistingSecret: redis ## connections.redisSecretKeyName The name of the key that contains the Redis user password ## If you set a password in `redisPassword`, a secret will be automatically created, the key name of which will be the value set here redisSecretKeyName: redis-password ## connections.redisPassword The password set for the Redis account ## If set to, it takes priority over the `connections.redisExistingSecret` - ## The value in this parameter overrides the value set in the `options` object in `local.json` if you add custom configuration file + ## The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file redisPassword: "" ## connections.redisNoPass Defines whether to use a Redis auth without a password ## If the connection to Redis server does not require a password, set the value to `true` From 87d44e0c011a3a242e7b6fac4a45474cf92778d7 Mon Sep 17 00:00:00 2001 From: VyacheslavSemin Date: Tue, 6 Aug 2024 15:16:32 +0000 Subject: [PATCH 19/26] Add the ability to change the number of replicas being updated --- README.md | 2 ++ templates/_helpers.tpl | 11 +++++++++++ templates/deployments/documentserver.yaml | 2 +- values.yaml | 13 ++++++++++--- 4 files changed, 24 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index c9baa24..7ec7bc7 100644 --- a/README.md +++ b/README.md @@ -372,6 +372,8 @@ The `helm delete` command removes all the Kubernetes components associated with | `documentserver.podAnnotations` | Map of annotations to add to the Documentserver deployment pods | `rollme: "{{ randAlphaNum 5 | quote }}"` | | `documentserver.replicas` | Number of Documentserver replicas to deploy. If the `documentserver.autoscaling.enabled` parameter is enabled, it is ignored. | `3` | | `documentserver.updateStrategy.type` | Documentserver deployment update strategy type | `RollingUpdate` | +| `documentserver.updateStrategy.rollingUpdate.maxUnavailable` | Maximum number of Documentserver Pods unavailable during the update process | `25%` | +| `documentserver.updateStrategy.rollingUpdate.maxSurge` | Maximum number of Documentserver Pods created over the desired number of Pods | `25%` | | `documentserver.customPodAntiAffinity` | Prohibiting the scheduling of Documentserver Pods relative to other Pods containing the specified labels on the same node | `{}` | | `documentserver.podAffinity` | Pod affinity rules for Documentserver Pods scheduling by nodes relative to other Pods | `{}` | | `documentserver.nodeAffinity` | Node affinity rules for Documentserver Pods scheduling by nodes | `{}` | diff --git a/templates/_helpers.tpl b/templates/_helpers.tpl index 5935718..bdf1b0e 100644 --- a/templates/_helpers.tpl +++ b/templates/_helpers.tpl @@ -177,6 +177,17 @@ Get the ds annotations {{- end }} {{- end -}} +{{/* +Get the update strategy type for ds +*/}} +{{- define "ds.update.strategyType" -}} +{{- if eq .type "RollingUpdate" -}} + {{- toYaml . | nindent 4 -}} +{{- else -}} + {{- omit . "rollingUpdate" | toYaml | nindent 4 -}} +{{- end -}} +{{- end -}} + {{/* Get the ds Service Account name */}} diff --git a/templates/deployments/documentserver.yaml b/templates/deployments/documentserver.yaml index 13c7bb5..045734d 100644 --- a/templates/deployments/documentserver.yaml +++ b/templates/deployments/documentserver.yaml @@ -23,7 +23,7 @@ spec: {{- include "ds.labels.commonLabels" . | trim | nindent 6 }} {{- end }} {{- if .Values.documentserver.updateStrategy }} - strategy: {{- toYaml .Values.documentserver.updateStrategy | nindent 4 }} + strategy: {{- include "ds.update.strategyType" .Values.documentserver.updateStrategy }} {{- end }} template: metadata: diff --git a/values.yaml b/values.yaml index 533c071..3e7d5b6 100644 --- a/values.yaml +++ b/values.yaml @@ -327,11 +327,18 @@ documentserver: ## documentserver.replicas Number of Documentserver replicas to deploy ## If the `documentserver.autoscaling.enabled` parameter is enabled, it is ignored replicas: 3 - ## Update strategy used to replace old Pods by new ones. Allowed values: `RollingUpdate` or `Recreate` - ## It is recommended to use the `RollingUpdate` type - ## docservice.updateStrategy.type Docservice deployment update strategy type + ## Update strategy used to replace old Pods by new ones updateStrategy: + ## documentserver.updateStrategy.type Documentserver deployment update strategy type + ## Allowed values: `RollingUpdate` or `Recreate` + ## It is recommended to use the `RollingUpdate` type type: RollingUpdate + # documentserver.updateStrategy.rollingUpdate Used only when `documentserver.updateStrategy.type=RollingUpdate` + rollingUpdate: + # documentserver.updateStrategy.rollingUpdate.maxUnavailable Maximum number of Documentserver Pods unavailable during the update process + maxUnavailable: 25% + # documentserver.updateStrategy.rollingUpdate.maxSurge Maximum number of Documentserver Pods created over the desired number of Pods + maxSurge: 25% ## documentserver.customPodAntiAffinity Prohibiting the scheduling of Documentserver Pods relative to other Pods containing the specified labels on the same node ## Example: ## customPodAntiAffinity: From 364a859ffb8640063687ca19a1a2b89a4618b540 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Thu, 8 Aug 2024 17:48:12 +0300 Subject: [PATCH 20/26] Rename local config --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 7ec7bc7..3c59cae 100644 --- a/README.md +++ b/README.md @@ -193,21 +193,21 @@ To allow the StatsD metrics in ONLYOFFICE Docs, follow step [5.2](#52-metrics-de #### 6.1 Create a ConfigMap containing a json file -In order to create a ConfigMap from a file that contains the `production-linux-local.json` structure, you need to run the following command: +In order to create a ConfigMap from a file that contains the `local-production-linux.json` structure, you need to run the following command: ```bash $ kubectl create configmap custom-local-config \ - --from-file=./production-linux-local.json + --from-file=./local-production-linux.json ``` Note: Any name except `local-config` can be used instead of `custom-local-config`. #### 6.2 Specify parameters when installing ONLYOFFICE Docs -When installing ONLYOFFICE Docs, specify the `extraConf.configMap=custom-local-config` and `extraConf.filename=production-linux-local.json` parameters +When installing ONLYOFFICE Docs, specify the `extraConf.configMap=custom-local-config` and `extraConf.filename=local-production-linux.json` parameters Note: If you need to add a configuration file after the ONLYOFFICE Docs is already installed, you need to execute step [6.1](#61-create-a-configmap-containing-a-json-file) -and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraConf.configMap=custom-local-config --set extraConf.filename=production-linux-local.json` command or +and then run the `helm upgrade documentserver onlyoffice/docs-shards --set extraConf.configMap=custom-local-config --set extraConf.filename=local-production-linux.json` command or `helm upgrade documentserver -f ./values.yaml onlyoffice/docs-shards` if the parameters are specified in the `values.yaml` file. ### 7. Add custom Fonts @@ -300,13 +300,13 @@ The `helm delete` command removes all the Kubernetes components associated with | `connections.redisConnectorName` | Defines which connector to use to connect to Redis. If you need to connect to Redis Sentinel, set the value `ioredis` | `redis` | | `connections.redisHost` | The IP address or the name of the Redis host | `redis-master.default.svc.cluster.local` | | `connections.redisPort` | The Redis server port number | `6379` | -| `connections.redisUser` | The Redis [user](https://redis.io/docs/management/security/acl/) name. The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file | `default` | -| `connections.redisDBNum` | Number of the redis logical database to be [selected](https://redis.io/commands/select/). The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file | `0` | +| `connections.redisUser` | The Redis [user](https://redis.io/docs/management/security/acl/) name. The value in this parameter overrides the value set in the `options` object in `local-production-linux.json` if you add custom configuration file | `default` | +| `connections.redisDBNum` | Number of the redis logical database to be [selected](https://redis.io/commands/select/). The value in this parameter overrides the value set in the `options` object in `local-production-linux.json` if you add custom configuration file | `0` | | `connections.redisClusterNodes` | List of nodes in the Redis cluster. There is no need to specify every node in the cluster, 3 should be enough. You can specify multiple values. It must be specified in the `host:port` format | `[]` | | `connections.redisSentinelGroupName` | Name of a group of Redis instances composed of a master and one or more slaves. Used if `connections.redisConnectorName` is set to `ioredis` | `mymaster` | -| `connections.redisPassword` | The password set for the Redis account. If set to, it takes priority over the `connections.redisExistingSecret`. The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file| `""` | +| `connections.redisPassword` | The password set for the Redis account. If set to, it takes priority over the `connections.redisExistingSecret`. The value in this parameter overrides the value set in the `options` object in `local-production-linux.json` if you add custom configuration file| `""` | | `connections.redisSecretKeyName` | The name of the key that contains the Redis user password | `redis-password` | -| `connections.redisExistingSecret` | Name of existing secret to use for Redis passwords. Must contain the key specified in `connections.redisSecretKeyName`. The password from this secret overrides password set in the `options` object in `production-linux-local.json` | `redis` | +| `connections.redisExistingSecret` | Name of existing secret to use for Redis passwords. Must contain the key specified in `connections.redisSecretKeyName`. The password from this secret overrides password set in the `options` object in `local-production-linux.json` | `redis` | | `connections.redisNoPass` | Defines whether to use a Redis auth without a password. If the connection to Redis server does not require a password, set the value to `true` | `false` | | `webProxy.enabled` | Specify whether a Web proxy is used in your network to access the Pods of k8s cluster to the Internet | `false` | | `webProxy.http` | Web Proxy address for `HTTP` traffic | `http://proxy.example.com` | @@ -356,7 +356,7 @@ The `helm delete` command removes all the Kubernetes components associated with | `jwt.outbox` | JSON Web Token validation parameters for outbox requests only. If not specified, the values of the parameters of the common `jwt` are used | `{}` | | `jwt.existingSecret` | The name of an existing secret containing variables for jwt. If not specified, a secret named `jwt` will be created | `""` | | `extraConf.configMap` | The name of the ConfigMap containing the json file that override the default values | `""` | -| `extraConf.filename` | The name of the json file that contains custom values. Must be the same as the `key` name in `extraConf.ConfigMap` | `production-linux-local.json` | +| `extraConf.filename` | The name of the json file that contains custom values. Must be the same as the `key` name in `extraConf.ConfigMap` | `local-production-linux.json` | | `extraThemes.configMap` | The name of the ConfigMap containing the json file that contains the interface themes | `""` | | `extraThemes.filename` | The name of the json file that contains custom interface themes. Must be the same as the `key` name in `extraThemes.configMap` | `custom-themes.json` | | `sqlScripts.branchName` | The name of the repository branch from which sql scripts will be downloaded | `master` | From 5faf7edf77ca8d7ca91984372b0922cb1b370272 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Thu, 8 Aug 2024 17:50:46 +0300 Subject: [PATCH 21/26] Rename config in values --- values.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/values.yaml b/values.yaml index 3e7d5b6..fd1d095 100644 --- a/values.yaml +++ b/values.yaml @@ -49,11 +49,11 @@ connections: redisPort: "6379" ## connections.redisUser The Redis user name ## ref: https://redis.io/docs/management/security/acl/ - ## The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file + ## The value in this parameter overrides the value set in the `options` object in `local-production-linux.json` if you add custom configuration file redisUser: default ## connections.redisDBNum Number of the redis logical database to be selected ## ref: https://redis.io/commands/select/ - ## The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file + ## The value in this parameter overrides the value set in the `options` object in `local-production-linux.json` if you add custom configuration file ## Not used if values are set in `connections.redisClusterNodes` redisDBNum: "0" ## connections.redisClusterNodes List of nodes in the Redis cluster @@ -70,14 +70,14 @@ connections: redisSentinelGroupName: mymaster ## connections.redisExistingSecret Name of existing secret to use for Redis passwords ## Must contain the key specified in `connections.redisSecretKeyName` - ## The password from this secret overrides the value for the password set in the `options` object in `production-linux-local.json` if you add custom configuration file + ## The password from this secret overrides the value for the password set in the `options` object in `local-production-linux.json` if you add custom configuration file redisExistingSecret: redis ## connections.redisSecretKeyName The name of the key that contains the Redis user password ## If you set a password in `redisPassword`, a secret will be automatically created, the key name of which will be the value set here redisSecretKeyName: redis-password ## connections.redisPassword The password set for the Redis account ## If set to, it takes priority over the `connections.redisExistingSecret` - ## The value in this parameter overrides the value set in the `options` object in `production-linux-local.json` if you add custom configuration file + ## The value in this parameter overrides the value set in the `options` object in `local-production-linux.json` if you add custom configuration file redisPassword: "" ## connections.redisNoPass Defines whether to use a Redis auth without a password ## If the connection to Redis server does not require a password, set the value to `true` From 61a7012cd9d4ce492726ef7088f65a708de91955 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Fri, 9 Aug 2024 15:14:35 +0300 Subject: [PATCH 22/26] Generate ingress configs with `--dry-run=server` Lookup function works correct only when have trying connected to a cluster --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3c59cae..45d66b1 100644 --- a/README.md +++ b/README.md @@ -762,7 +762,7 @@ If you want to deploy ONLYOFFICE Docs in cluster where already exist nginx-ingre > All available Redis connections parameters present [here](#4-parameters) with the `connections.` prefix ```bash -helm template docs onlyoffice/docs-shards --set documentserver.ingressCustomConfigMapsNamespace= --show-only templates/configmaps/balancer-snippet.yaml --show-only templates/configmaps/balancer-lua.yaml --dry-run=client > ./ingressConfigMaps.yaml +helm template docs onlyoffice/docs-shards --set documentserver.ingressCustomConfigMapsNamespace= --show-only templates/configmaps/balancer-snippet.yaml --show-only templates/configmaps/balancer-lua.yaml --dry-run=server > ./ingressConfigMaps.yaml ``` **The second step**, apply configMaps that you create with command below: From 532caf727aa70c73f2cbdfef4af0f15b18d8b0d9 Mon Sep 17 00:00:00 2001 From: VyacheslavSemin Date: Fri, 9 Aug 2024 12:36:25 +0000 Subject: [PATCH 23/26] Fix the default value --- README.md | 4 ++-- values.yaml | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 45d66b1..33921b7 100644 --- a/README.md +++ b/README.md @@ -739,11 +739,11 @@ In this case, more detailed information can be found in the application logs. ### 10. Access to the info page (optional) The access to `/info` page is limited by default. -In order to allow the access to it, you need to specify the IP addresses or subnets (that will be Proxy container clients in this case) using `proxy.infoAllowedIP` parameter. +In order to allow the access to it, you need to specify the IP addresses or subnets (that will be Proxy container clients in this case) using `documentserver.proxy.infoAllowedIP` parameter. Taking into consideration the specifics of Kubernetes net interaction it is possible to get the original IP of the user (being Proxy client) though it's not a standard scenario. Generally the Pods / Nodes / Load Balancer addresses will actually be the clients, so these addresses are to be used. In this case the access to the info page will be available to everyone. -You can further limit the access to the `info` page using Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) which you can turn on by setting `proxy.infoAllowedUser` parameter value and by setting the password using `proxy.infoAllowedPassword` parameter, alternatively you can use the existing secret with password by setting its name with `proxy.infoAllowedExistingSecret` parameter. +You can further limit the access to the `info` page using Nginx [Basic Authentication](https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html) which you can turn on by setting `documentserver.proxy.infoAllowedUser` parameter value and by setting the password using `documentserver.proxy.infoAllowedPassword` parameter, alternatively you can use the existing secret with password by setting its name with `documentserver.proxy.infoAllowedExistingSecret` parameter. ### 11. Deploy ONLYOFFICE Docs with your own dependency (optional) diff --git a/values.yaml b/values.yaml index fd1d095..2c260c5 100644 --- a/values.yaml +++ b/values.yaml @@ -620,10 +620,10 @@ documentserver: secureLinkSecret: verysecretstring ## documentserver.proxy.infoAllowedIP Defines ip addresses for accessing the info page ## Example: - infoAllowedIP: - - 10.244.0.0/16 - - 10.135.0.0/16 - ## infoAllowedIP: [] + ## infoAllowedIP: + ## - 10.244.0.79 + ## - 192.168.1.0/24 + infoAllowedIP: [] ## documentserver.proxy.infoAllowedUser Defines user name for accessing the info page ## If not set to, Nginx Basic Authentication will not be applied to access the info page ## ref: https://nginx.org/en/docs/http/ngx_http_auth_basic_module.html From 0d4af556deb41b4c92c6ac5ab5783e8751120866 Mon Sep 17 00:00:00 2001 From: danilapog Date: Fri, 9 Aug 2024 16:32:01 +0300 Subject: [PATCH 24/26] Deploy configmaps only when controller enabled --- templates/configmaps/balancer-lua.yaml | 2 ++ templates/configmaps/balancer-snippet.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/templates/configmaps/balancer-lua.yaml b/templates/configmaps/balancer-lua.yaml index 344b366..bc2e0b9 100644 --- a/templates/configmaps/balancer-lua.yaml +++ b/templates/configmaps/balancer-lua.yaml @@ -1,3 +1,4 @@ +{{- if index .Values "ingress-nginx" "enabled" }} apiVersion: v1 kind: ConfigMap metadata: @@ -393,3 +394,4 @@ data: }}) return _M +{{- end }} diff --git a/templates/configmaps/balancer-snippet.yaml b/templates/configmaps/balancer-snippet.yaml index 2dfa0e1..1e27b90 100644 --- a/templates/configmaps/balancer-snippet.yaml +++ b/templates/configmaps/balancer-snippet.yaml @@ -1,3 +1,4 @@ +{{- if index .Values "ingress-nginx" "enabled" }} apiVersion: v1 kind: ConfigMap metadata: @@ -228,3 +229,4 @@ data: if ($docs_wopisrc) { proxy_pass http://$custom_endpoint; } +{{- end }} From 0b3612221a6cfa8055bd86fda3ff9aab76a93a95 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Fri, 9 Aug 2024 16:39:27 +0300 Subject: [PATCH 25/26] Ignore some paths --- .github/workflows/stable_repo.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/stable_repo.yaml b/.github/workflows/stable_repo.yaml index 4bc5305..d68e623 100644 --- a/.github/workflows/stable_repo.yaml +++ b/.github/workflows/stable_repo.yaml @@ -5,6 +5,9 @@ on: push: branches: - master + paths-ignore: + - '**/README.md' + - '.github/**' jobs: build: From f26d3fb81189c56e0c8e1b262d29e788c98d8f77 Mon Sep 17 00:00:00 2001 From: Danil Titarenko <77471369+danilapog@users.noreply.github.com> Date: Tue, 13 Aug 2024 18:18:50 +0300 Subject: [PATCH 26/26] Add information about nginx-ingress subchart license (#3) * Add a link to the nginx-ingress subchart license * revert 7fe44ced * Use a file to provide information about the third-party software * Capitalize developer's name * Add apache 2.0 license text Also add small info header into modified config * Move main AGPL license to path --- LICENSES/LICENSE-APACHE2.0 | 193 +++++++++++++++++++++++++ NOTICE | 15 ++ templates/configmaps/balancer-lua.yaml | 5 + 3 files changed, 213 insertions(+) create mode 100644 LICENSES/LICENSE-APACHE2.0 create mode 100644 NOTICE diff --git a/LICENSES/LICENSE-APACHE2.0 b/LICENSES/LICENSE-APACHE2.0 new file mode 100644 index 0000000..657d417 --- /dev/null +++ b/LICENSES/LICENSE-APACHE2.0 @@ -0,0 +1,193 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..fb305f2 --- /dev/null +++ b/NOTICE @@ -0,0 +1,15 @@ +This product includes software developed by Kubernetes. +Modifications made by ONLYOFFICE. + +Included software license: https://github.com/kubernetes/ingress-nginx/blob/main/LICENSE +Original file: https://github.com/kubernetes/ingress-nginx/blob/main/rootfs/etc/nginx/lua/balancer.lua + +Modifications: +- The _M.balance function has been redesigned and the ability to return a peer if there is a match with the service name has been added. +- Path to the modified file: **/templates/configmaps/balancer-lua.yaml + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 diff --git a/templates/configmaps/balancer-lua.yaml b/templates/configmaps/balancer-lua.yaml index bc2e0b9..d568353 100644 --- a/templates/configmaps/balancer-lua.yaml +++ b/templates/configmaps/balancer-lua.yaml @@ -1,3 +1,8 @@ +# All content from balancer.lua config +# was taken from the main Kubernetes repository and modified by ONLYOFFICE +# for more information please check NOTICE by link below +# https://github.com/ONLYOFFICE/Kubernetes-Docs-Shards/blob/master/NOTICE + {{- if index .Values "ingress-nginx" "enabled" }} apiVersion: v1 kind: ConfigMap