From 096587978f20c53d998dcdf39c921fb69478df69 Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Tue, 29 Jun 2021 11:30:48 -0700 Subject: [PATCH] Modify charts --- .../{6.6.402+up6.6.4 => 6.11.0}/.helmignore | 0 .../{6.6.402+up6.6.4 => 6.11.0}/Chart.yaml | 4 +- .../{6.6.402+up6.6.4 => 6.11.0}/README.md | 16 +- .../dashboards/custom-dashboard.json | 0 .../templates/NOTES.txt | 0 .../6.11.0}/templates/_helpers.tpl | 13 + .../6.11.0}/templates/_pod.tpl | 25 +- .../templates/clusterrole.yaml | 0 .../templates/clusterrolebinding.yaml | 0 .../configmap-dashboard-provider.yaml | 0 .../templates/configmap.yaml | 4 +- .../templates/dashboards-json-configmap.yaml | 0 .../6.11.0}/templates/deployment.yaml | 4 +- .../templates/headless-service.yaml | 0 .../rancher-grafana/6.11.0/templates/hpa.yaml | 20 + .../templates/image-renderer-deployment.yaml | 0 .../image-renderer-network-policy.yaml | 0 .../templates/image-renderer-service.yaml | 2 + .../templates/ingress.yaml | 0 .../templates/nginx-config.yaml | 0 .../templates/poddisruptionbudget.yaml | 0 .../6.11.0}/templates/podsecuritypolicy.yaml | 29 +- .../templates/pvc.yaml | 0 .../templates/role.yaml | 0 .../templates/rolebinding.yaml | 0 .../templates/secret-env.yaml | 0 .../6.11.0}/templates/secret.yaml | 8 +- .../templates/service.yaml | 3 +- .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../templates/statefulset.yaml | 2 +- .../templates/tests/test-configmap.yaml | 0 .../tests/test-podsecuritypolicy.yaml | 0 .../templates/tests/test-role.yaml | 0 .../templates/tests/test-rolebinding.yaml | 0 .../templates/tests/test-serviceaccount.yaml | 0 .../templates/tests/test.yaml | 0 .../{6.6.402+up6.6.4 => 6.11.0}/values.yaml | 47 +- .../2.13.101+up2.13.1/LICENSE | 202 -- .../{2.13.101+up2.13.1 => 3.2.0}/.helmignore | 0 .../{2.13.101+up2.13.1 => 3.2.0}/Chart.yaml | 7 +- .../{2.13.101+up2.13.1 => 3.2.0}/OWNERS | 0 .../3.2.0}/README.md | 34 +- .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/clusterrolebinding.yaml | 0 .../3.2.0}/templates/deployment.yaml | 67 +- .../templates/kubeconfig-secret.yaml | 0 .../templates/pdb.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../templates/psp-clusterrole.yaml | 0 .../templates/psp-clusterrolebinding.yaml | 0 .../templates/role.yaml | 12 +- .../3.2.0}/templates/rolebinding.yaml | 2 +- .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../templates/stsdiscovery-role.yaml | 0 .../templates/stsdiscovery-rolebinding.yaml | 0 .../{2.13.101+up2.13.1 => 3.2.0}/values.yaml | 15 +- .../{14.5.101+up14.5.0 => 16.6.0}/Chart.yaml | 2 +- .../{14.5.101+up14.5.0 => 16.6.0}/README.md | 0 .../crd-manifest/crd-alertmanagerconfigs.yaml | 2 +- .../crd-manifest/crd-alertmanagers.yaml | 2 +- .../crd-manifest/crd-podmonitors.yaml | 4 +- .../crd-manifest/crd-probes.yaml | 144 +- .../crd-manifest/crd-prometheuses.yaml | 21 +- .../crd-manifest/crd-prometheusrules.yaml | 2 +- .../crd-manifest/crd-servicemonitors.yaml | 4 +- .../crd-manifest/crd-thanosrulers.yaml | 4 +- .../templates/_helpers.tpl | 21 + .../templates/jobs.yaml | 38 +- .../templates/manifest.yaml | 0 .../templates/rbac.yaml | 2 +- .../{14.5.101+up14.5.0 => 16.6.0}/values.yaml | 4 +- .../charts/kube-state-metrics/LICENSE | 202 -- .../charts/kubeAdmScheduler/README.md | 54 - .../kubeAdmScheduler/templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/kubeAdmScheduler/values.yaml | 86 - .../charts/rke2ControllerManager/README.md | 54 - .../templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rke2ControllerManager/values.yaml | 86 - .../charts/rke2Etcd/README.md | 54 - .../charts/rke2Etcd/templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../rke2Etcd/templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rke2Etcd/values.yaml | 86 - .../charts/rke2Proxy/README.md | 54 - .../charts/rke2Proxy/templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../rke2Proxy/templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rke2Proxy/values.yaml | 86 - .../charts/rke2Scheduler/README.md | 54 - .../rke2Scheduler/templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rke2Scheduler/values.yaml | 86 - .../charts/rkeControllerManager/README.md | 54 - .../templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rkeControllerManager/values.yaml | 86 - .../charts/rkeEtcd/README.md | 54 - .../charts/rkeEtcd/templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../rkeEtcd/templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rkeEtcd/values.yaml | 86 - .../charts/rkeProxy/README.md | 54 - .../charts/rkeProxy/templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../rkeProxy/templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rkeProxy/values.yaml | 86 - .../charts/rkeScheduler/README.md | 54 - .../rkeScheduler/templates/_helpers.tpl | 87 - .../templates/pushprox-clients-rbac.yaml | 74 - .../templates/pushprox-clients.yaml | 135 - .../templates/pushprox-proxy-rbac.yaml | 63 - .../templates/pushprox-servicemonitor.yaml | 39 - .../charts/rkeScheduler/values.yaml | 86 - .../windowsExporter/templates/_helpers.tpl | 73 - .../grafana/dashboards-1.14/etcd.yaml | 1118 ------- .../dashboards-1.14/k8s-resources-pod.yaml | 1772 ----------- .../dashboards/k8s-cluster-rsrc-use.yaml | 959 ------ .../grafana/dashboards/k8s-node-rsrc-use.yaml | 986 ------ .../dashboards/k8s-resources-cluster.yaml | 1479 --------- .../dashboards/k8s-resources-namespace.yaml | 963 ------ .../grafana/dashboards/k8s-resources-pod.yaml | 1006 ------ .../dashboards/k8s-resources-workload.yaml | 936 ------ .../k8s-resources-workloads-namespace.yaml | 972 ------ .../templates/grafana/dashboards/nodes.yaml | 1383 --------- .../dashboards/persistentvolumesusage.yaml | 573 ---- .../templates/grafana/dashboards/pods.yaml | 680 ---- .../grafana/dashboards/statefulset.yaml | 926 ------ .../prometheus/rules/alertmanager.rules.yaml | 63 - .../templates/prometheus/rules/etcd.yaml | 181 -- .../prometheus/rules/general.rules.yaml | 56 - .../templates/prometheus/rules/k8s.rules.yaml | 83 - .../rules/kube-apiserver.rules.yaml | 39 - .../kube-prometheus-node-alerting.rules.yaml | 47 - .../kube-prometheus-node-recording.rules.yaml | 41 - .../rules/kube-scheduler.rules.yaml | 65 - .../prometheus/rules/kubernetes-absent.yaml | 159 - .../prometheus/rules/kubernetes-apps.yaml | 200 -- .../rules/kubernetes-resources.yaml | 121 - .../prometheus/rules/kubernetes-storage.yaml | 72 - .../prometheus/rules/kubernetes-system.yaml | 184 -- .../prometheus/rules/node-network.yaml | 57 - .../templates/prometheus/rules/node-time.yaml | 37 - .../prometheus/rules/node.rules.yaml | 202 -- .../prometheus/rules/prometheus-operator.yaml | 49 - .../prometheus/rules/prometheus.rules.yaml | 139 - .../{14.5.101+up14.5.0 => 16.6.0}/.helmignore | 0 .../CHANGELOG.md | 0 .../CONTRIBUTING.md | 0 .../{14.5.101+up14.5.0 => 16.6.0}/Chart.yaml | 16 +- .../{14.5.101+up14.5.0 => 16.6.0}/README.md | 28 +- .../app-README.md | 0 .../charts/grafana/.helmignore | 0 .../charts/grafana/Chart.yaml | 4 +- .../charts/grafana/README.md | 16 +- .../grafana/dashboards/custom-dashboard.json | 0 .../charts/grafana/templates/NOTES.txt | 0 .../charts/grafana}/templates/_helpers.tpl | 13 + .../16.6.0/charts/grafana}/templates/_pod.tpl | 25 +- .../charts/grafana/templates/clusterrole.yaml | 0 .../grafana/templates/clusterrolebinding.yaml | 0 .../configmap-dashboard-provider.yaml | 0 .../charts/grafana/templates/configmap.yaml | 4 +- .../templates/dashboards-json-configmap.yaml | 0 .../charts/grafana}/templates/deployment.yaml | 4 +- .../grafana/templates/headless-service.yaml | 0 .../16.6.0/charts/grafana/templates/hpa.yaml | 20 + .../templates/image-renderer-deployment.yaml | 0 .../image-renderer-network-policy.yaml | 0 .../templates/image-renderer-service.yaml | 2 + .../charts/grafana/templates/ingress.yaml | 0 .../grafana/templates/nginx-config.yaml | 0 .../templates/poddisruptionbudget.yaml | 0 .../grafana}/templates/podsecuritypolicy.yaml | 29 +- .../charts/grafana/templates/pvc.yaml | 0 .../charts/grafana/templates/role.yaml | 0 .../charts/grafana/templates/rolebinding.yaml | 0 .../charts/grafana/templates/secret-env.yaml | 0 .../charts/grafana}/templates/secret.yaml | 8 +- .../charts/grafana/templates/service.yaml | 3 +- .../grafana/templates/serviceaccount.yaml | 0 .../grafana/templates/servicemonitor.yaml | 0 .../charts/grafana/templates/statefulset.yaml | 2 +- .../templates/tests/test-configmap.yaml | 0 .../tests/test-podsecuritypolicy.yaml | 0 .../grafana/templates/tests/test-role.yaml | 0 .../templates/tests/test-rolebinding.yaml | 0 .../templates/tests/test-serviceaccount.yaml | 0 .../charts/grafana/templates/tests/test.yaml | 0 .../charts/grafana/values.yaml | 47 +- .../charts/hardenedKubelet}/.helmignore | 0 .../16.6.0/charts/hardenedKubelet/Chart.yaml | 13 + .../charts/hardenedKubelet}/README.md | 6 + .../hardenedKubelet}/templates/_helpers.tpl | 19 +- .../templates/pushprox-clients-rbac.yaml | 9 +- .../templates/pushprox-clients.yaml | 10 + .../templates/pushprox-proxy-rbac.yaml | 6 +- .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 8 +- .../charts/hardenedKubelet}/values.yaml | 31 +- .../charts/hardenedNodeExporter}/.helmignore | 0 .../charts/hardenedNodeExporter/Chart.yaml | 13 + .../charts/hardenedNodeExporter}/README.md | 6 + .../templates/_helpers.tpl | 19 +- .../templates/pushprox-clients-rbac.yaml | 9 +- .../templates/pushprox-clients.yaml | 10 + .../templates/pushprox-proxy-rbac.yaml | 6 +- .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 8 +- .../charts/hardenedNodeExporter}/values.yaml | 31 +- .../charts/k3sServer}/.helmignore | 0 .../charts/k3sServer/Chart.yaml | 2 +- .../charts/k3sServer}/README.md | 6 + .../charts/k3sServer}/templates/_helpers.tpl | 19 +- .../templates/pushprox-clients-rbac.yaml | 9 +- .../k3sServer/templates/pushprox-clients.yaml | 10 + .../templates/pushprox-proxy-rbac.yaml | 6 +- .../k3sServer}/templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 8 +- .../charts/k3sServer/values.yaml | 31 +- .../charts/kube-state-metrics/.helmignore | 0 .../charts/kube-state-metrics/Chart.yaml | 7 +- .../charts/kube-state-metrics}/README.md | 34 +- .../kube-state-metrics/templates/NOTES.txt | 0 .../kube-state-metrics/templates/_helpers.tpl | 0 .../templates/clusterrolebinding.yaml | 0 .../templates/deployment.yaml | 67 +- .../templates/kubeconfig-secret.yaml | 0 .../kube-state-metrics/templates/pdb.yaml | 0 .../templates/podsecuritypolicy.yaml | 0 .../templates/psp-clusterrole.yaml | 0 .../templates/psp-clusterrolebinding.yaml | 0 .../kube-state-metrics/templates/role.yaml | 12 +- .../templates/rolebinding.yaml | 2 +- .../kube-state-metrics/templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../templates/servicemonitor.yaml | 0 .../templates/stsdiscovery-role.yaml | 0 .../templates/stsdiscovery-rolebinding.yaml | 0 .../charts/kube-state-metrics/values.yaml | 15 +- .../kubeAdmControllerManager}/.helmignore | 0 .../kubeAdmControllerManager/Chart.yaml | 2 +- .../kubeAdmControllerManager}/README.md | 6 + .../templates/_helpers.tpl | 19 +- .../templates/pushprox-clients-rbac.yaml | 9 +- .../templates/pushprox-clients.yaml | 10 + .../templates/pushprox-proxy-rbac.yaml | 6 +- .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 8 +- .../kubeAdmControllerManager}/values.yaml | 31 +- .../charts/kubeAdmEtcd}/.helmignore | 0 .../charts/kubeAdmEtcd/Chart.yaml | 2 +- .../16.6.0/charts/kubeAdmEtcd/README.md | 60 + .../charts/kubeAdmEtcd/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/kubeAdmEtcd/values.yaml | 111 + .../charts/kubeAdmProxy}/.helmignore | 0 .../charts/kubeAdmProxy/Chart.yaml | 2 +- .../16.6.0/charts/kubeAdmProxy/README.md | 60 + .../kubeAdmProxy/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/kubeAdmProxy/values.yaml | 111 + .../charts/kubeAdmScheduler}/.helmignore | 0 .../charts/kubeAdmScheduler/Chart.yaml | 2 +- .../16.6.0/charts/kubeAdmScheduler/README.md | 60 + .../kubeAdmScheduler/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../charts/kubeAdmScheduler/values.yaml | 111 + .../charts/prometheus-adapter/.helmignore | 0 .../charts/prometheus-adapter/Chart.yaml | 4 +- .../charts/prometheus-adapter}/README.md | 4 +- .../prometheus-adapter/templates/NOTES.txt | 0 .../prometheus-adapter/templates/_helpers.tpl | 0 .../templates/certmanager.yaml | 0 .../cluster-role-binding-auth-delegator.yaml | 0 .../cluster-role-binding-resource-reader.yaml | 0 .../cluster-role-resource-reader.yaml | 0 .../templates/configmap.yaml | 0 .../templates/custom-metrics-apiservice.yaml | 0 ...stom-metrics-cluster-role-binding-hpa.yaml | 0 .../custom-metrics-cluster-role.yaml | 0 .../templates/deployment.yaml | 0 .../external-metrics-apiservice.yaml | 0 ...rnal-metrics-cluster-role-binding-hpa.yaml | 0 .../external-metrics-cluster-role.yaml | 0 .../prometheus-adapter/templates/pdb.yaml | 0 .../prometheus-adapter/templates/psp.yaml | 3 + .../resource-metrics-apiservice.yaml | 0 ...resource-metrics-cluster-role-binding.yaml | 0 .../resource-metrics-cluster-role.yaml | 0 .../templates/role-binding-auth-reader.yaml | 0 .../prometheus-adapter/templates/secret.yaml | 0 .../prometheus-adapter/templates/service.yaml | 0 .../templates/serviceaccount.yaml | 4 + .../charts/prometheus-adapter}/values.yaml | 11 +- .../prometheus-node-exporter/.helmignore | 0 .../prometheus-node-exporter/Chart.yaml | 2 +- .../charts/prometheus-node-exporter/README.md | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/daemonset.yaml | 4 + .../templates/endpoints.yaml | 0 .../templates/monitor.yaml | 0 .../templates/psp-clusterrole.yaml | 0 .../templates/psp-clusterrolebinding.yaml | 0 .../templates/psp.yaml | 4 + .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../prometheus-node-exporter}/values.yaml | 5 + .../charts/rke2ControllerManager}/.helmignore | 0 .../charts/rke2ControllerManager/Chart.yaml | 2 +- .../charts/rke2ControllerManager/README.md | 60 + .../templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../charts/rke2ControllerManager/values.yaml | 111 + .../charts/rke2Etcd}/.helmignore | 0 .../charts/rke2Etcd/Chart.yaml | 2 +- .../16.6.0/charts/rke2Etcd/README.md | 60 + .../charts/rke2Etcd/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../rke2Etcd/templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rke2Etcd}/templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/rke2Etcd/values.yaml | 111 + .../charts/rke2IngressNginx}/.helmignore | 0 .../16.6.0/charts/rke2IngressNginx/Chart.yaml | 13 + .../16.6.0/charts/rke2IngressNginx/README.md | 60 + .../rke2IngressNginx/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../charts/rke2IngressNginx/values.yaml | 111 + .../charts/rke2Proxy}/.helmignore | 0 .../charts/rke2Proxy/Chart.yaml | 2 +- .../16.6.0/charts/rke2Proxy/README.md | 60 + .../charts/rke2Proxy/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../rke2Proxy/templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rke2Proxy}/templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/rke2Proxy/values.yaml | 111 + .../charts/rke2Scheduler}/.helmignore | 0 .../charts/rke2Scheduler/Chart.yaml | 2 +- .../16.6.0/charts/rke2Scheduler/README.md | 60 + .../rke2Scheduler/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/rke2Scheduler/values.yaml | 111 + .../charts/rkeControllerManager}/.helmignore | 0 .../charts/rkeControllerManager/Chart.yaml | 2 +- .../charts/rkeControllerManager/README.md | 60 + .../templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 0 .../templates/pushprox-servicemonitor.yaml | 33 + .../charts/rkeControllerManager/values.yaml | 111 + .../charts/rkeEtcd}/.helmignore | 0 .../charts/rkeEtcd/Chart.yaml | 2 +- .../16.6.0/charts/rkeEtcd/README.md | 60 + .../charts/rkeEtcd/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../rkeEtcd/templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rkeEtcd/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/rkeEtcd/values.yaml | 111 + .../charts/rkeIngressNginx}/.helmignore | 0 .../16.6.0/charts/rkeIngressNginx/Chart.yaml | 13 + .../16.6.0/charts/rkeIngressNginx/README.md | 60 + .../rkeIngressNginx/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/rkeIngressNginx/values.yaml | 111 + .../16.6.0/charts/rkeProxy/.helmignore | 23 + .../charts/rkeProxy/Chart.yaml | 2 +- .../16.6.0/charts/rkeProxy/README.md | 60 + .../charts/rkeProxy/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../rkeProxy/templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rkeProxy/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/rkeProxy/values.yaml | 111 + .../16.6.0/charts/rkeScheduler/.helmignore | 23 + .../charts/rkeScheduler/Chart.yaml | 2 +- .../16.6.0/charts/rkeScheduler/README.md | 60 + .../rkeScheduler/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 33 + .../16.6.0/charts/rkeScheduler/values.yaml | 111 + .../16.6.0/charts/windowsExporter/.helmignore | 23 + .../charts/windowsExporter/Chart.yaml | 4 +- .../charts/windowsExporter/README.md | 0 .../scripts/check-wins-version.ps1 | 0 .../windowsExporter/scripts/proxy-entry.ps1 | 0 .../charts/windowsExporter/scripts/run.ps1 | 0 .../windowsExporter/templates/_helpers.tpl | 113 + .../windowsExporter/templates/configmap.yaml | 0 .../windowsExporter/templates/daemonset.yaml | 0 .../templates/prometheusrule.yaml | 13 + .../windowsExporter/templates/rbac.yaml | 0 .../windowsExporter/templates/service.yaml | 0 .../templates/servicemonitor.yaml | 5 +- .../charts/windowsExporter}/values.yaml | 6 +- .../files/ingress-nginx/nginx.json | 4 +- .../request-handling-performance.json | 8 +- .../cluster/rancher-cluster-nodes.json | 10 +- .../rancher/cluster/rancher-cluster.json | 10 +- .../rancher/home/rancher-default-home.json | 4 +- .../files/rancher/k8s/rancher-etcd-nodes.json | 0 .../files/rancher/k8s/rancher-etcd.json | 0 .../k8s/rancher-k8s-components-nodes.json | 0 .../rancher/k8s/rancher-k8s-components.json | 0 .../rancher/nodes/rancher-node-detail.json | 10 +- .../files/rancher/nodes/rancher-node.json | 10 +- .../rancher/pods/rancher-pod-containers.json | 0 .../files/rancher/pods/rancher-pod.json | 0 .../workloads/rancher-workload-pods.json | 0 .../rancher/workloads/rancher-workload.json | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 53 +- .../templates/alertmanager/alertmanager.yaml | 6 +- .../templates/alertmanager/cleanupSecret.yaml | 0 .../templates/alertmanager/ingress.yaml | 30 +- .../alertmanager/ingressperreplica.yaml | 21 +- .../alertmanager/podDisruptionBudget.yaml | 0 .../templates/alertmanager/psp-role.yaml | 0 .../alertmanager/psp-rolebinding.yaml | 0 .../templates/alertmanager/psp.yaml | 0 .../templates/alertmanager/secret.yaml | 0 .../templates/alertmanager/service.yaml | 0 .../alertmanager/serviceaccount.yaml | 4 + .../alertmanager/servicemonitor.yaml | 3 + .../alertmanager/serviceperreplica.yaml | 0 .../templates/exporters/core-dns/service.yaml | 0 .../exporters/core-dns/servicemonitor.yaml | 3 + .../kube-api-server/servicemonitor.yaml | 7 +- .../kube-controller-manager/endpoints.yaml | 0 .../kube-controller-manager/service.yaml | 0 .../servicemonitor.yaml | 3 + .../templates/exporters/kube-dns/service.yaml | 0 .../exporters/kube-dns/servicemonitor.yaml | 3 + .../exporters/kube-etcd/endpoints.yaml | 0 .../exporters/kube-etcd/service.yaml | 0 .../exporters/kube-etcd/servicemonitor.yaml | 3 + .../exporters/kube-proxy/endpoints.yaml | 0 .../exporters/kube-proxy/service.yaml | 0 .../exporters/kube-proxy/servicemonitor.yaml | 3 + .../exporters/kube-scheduler/endpoints.yaml | 0 .../exporters/kube-scheduler/service.yaml | 0 .../kube-scheduler/servicemonitor.yaml | 3 + .../kube-state-metrics/serviceMonitor.yaml | 8 + .../exporters/kubelet/servicemonitor.yaml | 27 +- .../node-exporter/servicemonitor.yaml | 13 +- .../grafana/configmap-dashboards.yaml | 4 +- .../grafana/configmaps-datasources.yaml | 6 +- .../grafana/dashboards-1.14/apiserver.yaml | 2 +- .../dashboards-1.14/cluster-total.yaml | 8 +- .../dashboards-1.14/controller-manager.yaml | 52 +- .../grafana/dashboards-1.14}/etcd.yaml | 6 +- .../grafana/dashboards-1.14/k8s-coredns.yaml | 4 +- .../k8s-resources-cluster.yaml | 582 +++- .../k8s-resources-namespace.yaml | 2744 +++++++++++++++++ .../dashboards-1.14/k8s-resources-node.yaml | 22 +- .../dashboards-1.14/k8s-resources-pod.yaml} | 1401 +++++---- .../k8s-resources-workload.yaml | 108 +- .../k8s-resources-workloads-namespace.yaml | 152 +- .../grafana/dashboards-1.14/kubelet.yaml | 66 +- .../dashboards-1.14/namespace-by-pod.yaml | 10 +- .../namespace-by-workload.yaml | 12 +- .../node-cluster-rsrc-use.yaml | 36 +- .../dashboards-1.14/node-rsrc-use.yaml | 36 +- .../grafana/dashboards-1.14/nodes.yaml | 20 +- .../persistentvolumesusage.yaml | 18 +- .../grafana/dashboards-1.14/pod-total.yaml | 12 +- .../prometheus-remote-write.yaml | 2 +- .../grafana/dashboards-1.14/prometheus.yaml | 26 +- .../grafana/dashboards-1.14/proxy.yaml | 62 +- .../grafana/dashboards-1.14/scheduler.yaml | 70 +- .../grafana/dashboards-1.14/statefulset.yaml | 8 +- .../dashboards-1.14/workload-total.yaml | 14 +- .../templates/grafana/namespaces.yaml | 0 .../templates/grafana/servicemonitor.yaml | 0 .../job-patch/clusterrole.yaml | 0 .../job-patch/clusterrolebinding.yaml | 0 .../job-patch/job-createSecret.yaml | 8 +- .../job-patch/job-patchWebhook.yaml | 8 +- .../admission-webhooks/job-patch/psp.yaml | 0 .../admission-webhooks/job-patch/role.yaml | 0 .../job-patch/rolebinding.yaml | 0 .../job-patch/serviceaccount.yaml | 2 + .../mutatingWebhookConfiguration.yaml | 4 +- .../validatingWebhookConfiguration.yaml | 4 +- .../prometheus-operator/certmanager.yaml | 4 +- .../prometheus-operator/clusterrole.yaml | 0 .../clusterrolebinding.yaml | 0 .../prometheus-operator/deployment.yaml | 0 .../prometheus-operator/psp-clusterrole.yaml | 0 .../psp-clusterrolebinding.yaml | 0 .../templates/prometheus-operator/psp.yaml | 0 .../prometheus-operator/service.yaml | 0 .../prometheus-operator/serviceaccount.yaml | 4 + .../prometheus-operator/servicemonitor.yaml | 0 .../templates/prometheus/_rules.tpl | 2 +- .../additionalAlertRelabelConfigs.yaml | 0 .../additionalAlertmanagerConfigs.yaml | 0 .../prometheus/additionalPrometheusRules.yaml | 3 + .../prometheus/additionalScrapeConfigs.yaml | 0 .../templates/prometheus/clusterrole.yaml | 0 .../prometheus/clusterrolebinding.yaml | 0 .../templates/prometheus/ingress.yaml | 42 +- .../prometheus/ingressThanosSidecar.yaml | 30 +- .../prometheus/ingressperreplica.yaml | 21 +- .../templates/prometheus/nginx-config.yaml | 0 .../prometheus/podDisruptionBudget.yaml | 2 +- .../templates/prometheus/podmonitors.yaml | 0 .../templates/prometheus/prometheus.yaml | 31 +- .../templates/prometheus/psp-clusterrole.yaml | 0 .../prometheus/psp-clusterrolebinding.yaml | 0 .../templates/prometheus/psp.yaml | 0 .../rules-1.14/alertmanager.rules.yaml | 0 .../templates/prometheus/rules-1.14/etcd.yaml | 4 +- .../prometheus/rules-1.14/general.rules.yaml | 0 .../prometheus/rules-1.14/k8s.rules.yaml | 36 +- .../kube-apiserver-availability.rules.yaml | 0 .../rules-1.14/kube-apiserver-slos.yaml | 0 .../rules-1.14/kube-apiserver.rules.yaml | 0 .../kube-prometheus-general.rules.yaml | 0 .../kube-prometheus-node-recording.rules.yaml | 0 .../rules-1.14/kube-scheduler.rules.yaml | 0 .../rules-1.14/kube-state-metrics.yaml | 0 .../prometheus/rules-1.14/kubelet.rules.yaml | 10 +- .../rules-1.14/kubernetes-apps.yaml | 6 +- .../rules-1.14/kubernetes-resources.yaml | 18 +- .../rules-1.14/kubernetes-storage.yaml | 10 +- .../kubernetes-system-apiserver.yaml | 2 +- .../kubernetes-system-controller-manager.yaml | 0 .../rules-1.14/kubernetes-system-kubelet.yaml | 8 +- .../kubernetes-system-scheduler.yaml | 0 .../rules-1.14/kubernetes-system.yaml | 2 +- .../rules-1.14/node-exporter.rules.yaml | 0 .../prometheus/rules-1.14/node-exporter.yaml | 0 .../prometheus/rules-1.14/node-network.yaml | 0 .../prometheus/rules-1.14/node.rules.yaml | 2 +- .../rules-1.14/prometheus-operator.yaml | 0 .../prometheus/rules-1.14/prometheus.yaml | 0 .../templates/prometheus/service.yaml | 2 +- .../prometheus/serviceThanosSidecar.yaml | 2 +- .../serviceThanosSidecarExternal.yaml | 28 + .../templates/prometheus/serviceaccount.yaml | 4 + .../templates/prometheus/servicemonitor.yaml | 0 .../templates/prometheus/servicemonitors.yaml | 0 .../prometheus/serviceperreplica.yaml | 2 +- .../rancher-monitoring/clusterrole.yaml | 0 .../rancher-monitoring/config-role.yaml | 0 .../rancher-monitoring/dashboard-role.yaml | 0 .../addons/ingress-nginx-dashboard.yaml | 6 +- .../rancher/cluster-dashboards.yaml | 0 .../dashboards/rancher/default-dashboard.yaml | 0 .../dashboards/rancher/k8s-dashboards.yaml | 0 .../dashboards/rancher/nodes-dashboards.yaml | 0 .../dashboards/rancher/pods-dashboards.yaml | 0 .../rancher/workload-dashboards.yaml | 0 .../exporters/ingress-nginx/service.yaml | 5 +- .../ingress-nginx/servicemonitor.yaml | 8 +- .../rancher-monitoring/hardened.yaml | 0 .../templates/validate-install-crd.yaml | 0 .../{14.5.101+up14.5.0 => 16.6.0}/values.yaml | 306 +- .../.helmignore | 0 .../Chart.yaml | 2 +- .../OWNERS | 0 .../README.md | 0 .../ci/port-values.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/daemonset.yaml | 4 + .../templates/endpoints.yaml | 0 .../templates/monitor.yaml | 0 .../templates/psp-clusterrole.yaml | 0 .../templates/psp-clusterrolebinding.yaml | 0 .../templates/psp.yaml | 4 + .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 0 .../1.18.100+up1.18.1}/values.yaml | 5 + .../{2.12.101+up2.12.1 => 2.14.0}/.helmignore | 0 .../{2.12.101+up2.12.1 => 2.14.0}/Chart.yaml | 4 +- .../2.14.0}/README.md | 4 +- .../ci/default-values.yaml | 0 .../ci/external-rules-values.yaml | 0 .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 0 .../templates/certmanager.yaml | 0 .../cluster-role-binding-auth-delegator.yaml | 0 .../cluster-role-binding-resource-reader.yaml | 0 .../cluster-role-resource-reader.yaml | 0 .../templates/configmap.yaml | 0 .../templates/custom-metrics-apiservice.yaml | 0 ...stom-metrics-cluster-role-binding-hpa.yaml | 0 .../custom-metrics-cluster-role.yaml | 0 .../templates/deployment.yaml | 0 .../external-metrics-apiservice.yaml | 0 ...rnal-metrics-cluster-role-binding-hpa.yaml | 0 .../external-metrics-cluster-role.yaml | 0 .../templates/pdb.yaml | 0 .../templates/psp.yaml | 3 + .../resource-metrics-apiservice.yaml | 0 ...resource-metrics-cluster-role-binding.yaml | 0 .../resource-metrics-cluster-role.yaml | 0 .../templates/role-binding-auth-reader.yaml | 0 .../templates/secret.yaml | 0 .../templates/service.yaml | 0 .../templates/serviceaccount.yaml | 4 + .../2.14.0}/values.yaml | 11 +- .../rancher-pushprox/0.1.400/.helmignore | 23 + .../rancher-pushprox/0.1.400/Chart.yaml | 13 + .../rancher-pushprox/0.1.400/README.md | 60 + .../0.1.400/templates/_helpers.tpl | 104 + .../templates/pushprox-clients-rbac.yaml | 77 + .../0.1.400/templates/pushprox-clients.yaml | 145 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../0.1.400/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 33 + .../rancher-pushprox/0.1.400/values.yaml | 111 + .../0.1.0/templates/_helpers.tpl | 73 - .../0.1.100/.helmignore | 23 + .../{0.1.0 => 0.1.100}/Chart.yaml | 4 +- .../{0.1.0 => 0.1.100}/README.md | 0 .../scripts/check-wins-version.ps1 | 0 .../scripts/proxy-entry.ps1 | 0 .../{0.1.0 => 0.1.100}/scripts/run.ps1 | 0 .../0.1.100/templates/_helpers.tpl | 113 + .../templates/configmap.yaml | 0 .../templates/daemonset.yaml | 0 .../0.1.100/templates/prometheusrule.yaml | 13 + .../{0.1.0 => 0.1.100}/templates/rbac.yaml | 0 .../{0.1.0 => 0.1.100}/templates/service.yaml | 0 .../templates/servicemonitor.yaml | 5 +- .../0.1.100}/values.yaml | 6 +- 694 files changed, 15131 insertions(+), 22574 deletions(-) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/.helmignore (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/Chart.yaml (94%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/README.md (96%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/dashboards/custom-dashboard.json (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/NOTES.txt (100%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana => rancher-grafana/rancher-grafana/6.11.0}/templates/_helpers.tpl (90%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana => rancher-grafana/rancher-grafana/6.11.0}/templates/_pod.tpl (95%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/clusterrole.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/clusterrolebinding.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/configmap-dashboard-provider.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/configmap.yaml (95%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/dashboards-json-configmap.yaml (100%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana => rancher-grafana/rancher-grafana/6.11.0}/templates/deployment.yaml (83%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/headless-service.yaml (100%) create mode 100644 charts/rancher-grafana/rancher-grafana/6.11.0/templates/hpa.yaml rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/image-renderer-deployment.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/image-renderer-network-policy.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/image-renderer-service.yaml (94%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/ingress.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/nginx-config.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/poddisruptionbudget.yaml (100%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana => rancher-grafana/rancher-grafana/6.11.0}/templates/podsecuritypolicy.yaml (71%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/pvc.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/role.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/rolebinding.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/secret-env.yaml (100%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana => rancher-grafana/rancher-grafana/6.11.0}/templates/secret.yaml (62%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/service.yaml (97%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/serviceaccount.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/servicemonitor.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/statefulset.yaml (88%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/tests/test-configmap.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/tests/test-podsecuritypolicy.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/tests/test-role.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/tests/test-rolebinding.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/tests/test-serviceaccount.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/templates/tests/test.yaml (100%) rename charts/rancher-grafana/rancher-grafana/{6.6.402+up6.6.4 => 6.11.0}/values.yaml (96%) delete mode 100644 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/LICENSE rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/.helmignore (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/Chart.yaml (90%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/OWNERS (100%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics => rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0}/README.md (56%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/NOTES.txt (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/_helpers.tpl (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/clusterrolebinding.yaml (100%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics => rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0}/templates/deployment.yaml (81%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/kubeconfig-secret.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/pdb.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/podsecuritypolicy.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/psp-clusterrole.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/psp-clusterrolebinding.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/role.yaml (94%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics => rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0}/templates/rolebinding.yaml (95%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/service.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/serviceaccount.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/servicemonitor.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/stsdiscovery-role.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/templates/stsdiscovery-rolebinding.yaml (100%) rename charts/rancher-kube-state-metrics/rancher-kube-state-metrics/{2.13.101+up2.13.1 => 3.2.0}/values.yaml (91%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/Chart.yaml (92%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/README.md (100%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-alertmanagerconfigs.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-alertmanagers.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-podmonitors.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-probes.yaml (60%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-prometheuses.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-prometheusrules.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-servicemonitors.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/crd-manifest/crd-thanosrulers.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/templates/_helpers.tpl (51%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/templates/jobs.yaml (70%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/templates/manifest.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/templates/rbac.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring-crd/{14.5.101+up14.5.0 => 16.6.0}/values.yaml (79%) delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/LICENSE delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/README.md delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/values.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/_helpers.tpl delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/etcd.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-node-rsrc-use.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-cluster.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-namespace.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-pod.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workload.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/nodes.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/persistentvolumesusage.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/pods.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/statefulset.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/alertmanager.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/etcd.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/general.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/k8s.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-apiserver.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-scheduler.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-absent.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-apps.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-resources.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-storage.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-system.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-network.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-time.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node.rules.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus-operator.yaml delete mode 100644 charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus.rules.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/CHANGELOG.md (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/CONTRIBUTING.md (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/Chart.yaml (88%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/README.md (91%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/app-README.md (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/Chart.yaml (95%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/README.md (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/dashboards/custom-dashboard.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/NOTES.txt (100%) rename charts/{rancher-grafana/rancher-grafana/6.6.402+up6.6.4 => rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana}/templates/_helpers.tpl (90%) rename charts/{rancher-grafana/rancher-grafana/6.6.402+up6.6.4 => rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana}/templates/_pod.tpl (95%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/configmap-dashboard-provider.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/configmap.yaml (95%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/dashboards-json-configmap.yaml (100%) rename charts/{rancher-grafana/rancher-grafana/6.6.402+up6.6.4 => rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana}/templates/deployment.yaml (83%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/headless-service.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/hpa.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/image-renderer-deployment.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/image-renderer-network-policy.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/image-renderer-service.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/ingress.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/nginx-config.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/poddisruptionbudget.yaml (100%) rename charts/{rancher-grafana/rancher-grafana/6.6.402+up6.6.4 => rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana}/templates/podsecuritypolicy.yaml (71%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/pvc.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/rolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/secret-env.yaml (100%) rename charts/{rancher-grafana/rancher-grafana/6.6.402+up6.6.4 => rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana}/templates/secret.yaml (62%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/service.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/serviceaccount.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/servicemonitor.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/statefulset.yaml (88%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/tests/test-configmap.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/tests/test-podsecuritypolicy.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/tests/test-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/tests/test-rolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/tests/test-serviceaccount.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/templates/tests/test.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/grafana/values.yaml (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/k3sServer => 16.6.0/charts/hardenedKubelet}/.helmignore (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/Chart.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmControllerManager => 16.6.0/charts/hardenedKubelet}/README.md (81%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/k3sServer => 16.6.0/charts/hardenedKubelet}/templates/_helpers.tpl (80%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/k3sServer => 16.6.0/charts/hardenedKubelet}/templates/pushprox-clients-rbac.yaml (88%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmControllerManager => 16.6.0/charts/hardenedKubelet}/templates/pushprox-clients.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/hardenedKubelet}/templates/pushprox-proxy-rbac.yaml (90%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/k3sServer => 16.6.0/charts/hardenedKubelet}/templates/pushprox-proxy.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/hardenedKubelet}/templates/pushprox-servicemonitor.yaml (85%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmControllerManager => 16.6.0/charts/hardenedKubelet}/values.yaml (66%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmControllerManager => 16.6.0/charts/hardenedNodeExporter}/.helmignore (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/Chart.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/hardenedNodeExporter}/README.md (81%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/hardenedNodeExporter}/templates/_helpers.tpl (80%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/hardenedNodeExporter}/templates/pushprox-clients-rbac.yaml (88%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/hardenedNodeExporter}/templates/pushprox-clients.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/k3sServer => 16.6.0/charts/hardenedNodeExporter}/templates/pushprox-proxy-rbac.yaml (90%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmControllerManager => 16.6.0/charts/hardenedNodeExporter}/templates/pushprox-proxy.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmControllerManager => 16.6.0/charts/hardenedNodeExporter}/templates/pushprox-servicemonitor.yaml (85%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/hardenedNodeExporter}/values.yaml (66%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/k3sServer}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/k3sServer/Chart.yaml (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/k3sServer}/README.md (81%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/k3sServer}/templates/_helpers.tpl (80%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/k3sServer}/templates/pushprox-clients-rbac.yaml (88%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/k3sServer/templates/pushprox-clients.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/k3sServer}/templates/pushprox-proxy-rbac.yaml (90%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmEtcd => 16.6.0/charts/k3sServer}/templates/pushprox-proxy.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/k3sServer}/templates/pushprox-servicemonitor.yaml (85%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/k3sServer/values.yaml (66%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/Chart.yaml (90%) rename charts/{rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1 => rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics}/README.md (56%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/NOTES.txt (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/_helpers.tpl (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/clusterrolebinding.yaml (100%) rename charts/{rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1 => rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics}/templates/deployment.yaml (81%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/kubeconfig-secret.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/pdb.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/podsecuritypolicy.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/psp-clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/role.yaml (94%) rename charts/{rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1 => rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics}/templates/rolebinding.yaml (95%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/serviceaccount.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/servicemonitor.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/stsdiscovery-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kube-state-metrics/values.yaml (91%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/kubeAdmControllerManager}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kubeAdmControllerManager/Chart.yaml (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/k3sServer => 16.6.0/charts/kubeAdmControllerManager}/README.md (81%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kubeAdmControllerManager/templates/_helpers.tpl (80%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml (88%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/kubeAdmControllerManager}/templates/pushprox-clients.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml (90%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/kubeAdmControllerManager}/templates/pushprox-proxy.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/k3sServer => 16.6.0/charts/kubeAdmControllerManager}/templates/pushprox-servicemonitor.yaml (85%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmProxy => 16.6.0/charts/kubeAdmControllerManager}/values.yaml (66%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmScheduler => 16.6.0/charts/kubeAdmEtcd}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kubeAdmEtcd/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/kubeAdmScheduler => 16.6.0/charts/kubeAdmEtcd}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2ControllerManager => 16.6.0/charts/kubeAdmProxy}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kubeAdmProxy/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2ControllerManager => 16.6.0/charts/kubeAdmProxy}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2Etcd => 16.6.0/charts/kubeAdmScheduler}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/kubeAdmScheduler/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2Etcd => 16.6.0/charts/kubeAdmScheduler}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/Chart.yaml (95%) rename charts/{rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1 => rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter}/README.md (98%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/NOTES.txt (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/_helpers.tpl (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/certmanager.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/configmap.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/deployment.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/pdb.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/psp.yaml (95%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/secret.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-adapter/templates/serviceaccount.yaml (75%) rename charts/{rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1 => rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter}/values.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/Chart.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/README.md (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/NOTES.txt (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/_helpers.tpl (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/daemonset.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/endpoints.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/monitor.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/psp.yaml (92%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/prometheus-node-exporter/templates/serviceaccount.yaml (100%) rename charts/{rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2 => rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter}/values.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2Proxy => 16.6.0/charts/rke2ControllerManager}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rke2ControllerManager/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2Proxy => 16.6.0/charts/rke2ControllerManager}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2Scheduler => 16.6.0/charts/rke2Etcd}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rke2Etcd/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rke2Scheduler => 16.6.0/charts/rke2Etcd}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeControllerManager => 16.6.0/charts/rke2IngressNginx}/.helmignore (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/Chart.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeControllerManager => 16.6.0/charts/rke2IngressNginx}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeEtcd => 16.6.0/charts/rke2Proxy}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rke2Proxy/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeEtcd => 16.6.0/charts/rke2Proxy}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeProxy => 16.6.0/charts/rke2Scheduler}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rke2Scheduler/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeProxy => 16.6.0/charts/rke2Scheduler}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeScheduler => 16.6.0/charts/rkeControllerManager}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rkeControllerManager/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/rkeScheduler => 16.6.0/charts/rkeControllerManager}/templates/pushprox-proxy.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/values.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/charts/windowsExporter => 16.6.0/charts/rkeEtcd}/.helmignore (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rkeEtcd/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/values.yaml rename charts/{rancher-windows-exporter/rancher-windows-exporter/0.1.0 => rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx}/.helmignore (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/Chart.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/values.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/.helmignore rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rkeProxy/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/values.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/.helmignore rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/rkeScheduler/Chart.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/README.md create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/_helpers.tpl create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/values.yaml create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/.helmignore rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/Chart.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/README.md (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/scripts/check-wins-version.ps1 (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/scripts/proxy-entry.ps1 (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/scripts/run.ps1 (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/_helpers.tpl rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/templates/configmap.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/templates/daemonset.yaml (100%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/prometheusrule.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/templates/rbac.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/templates/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/charts/windowsExporter/templates/servicemonitor.yaml (88%) rename charts/{rancher-windows-exporter/rancher-windows-exporter/0.1.0 => rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter}/values.yaml (91%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/ingress-nginx/nginx.json (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/ingress-nginx/request-handling-performance.json (98%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/cluster/rancher-cluster-nodes.json (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/cluster/rancher-cluster.json (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/home/rancher-default-home.json (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/k8s/rancher-etcd-nodes.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/k8s/rancher-etcd.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/k8s/rancher-k8s-components-nodes.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/k8s/rancher-k8s-components.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/nodes/rancher-node-detail.json (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/nodes/rancher-node.json (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/pods/rancher-pod-containers.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/pods/rancher-pod.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/workloads/rancher-workload-pods.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/files/rancher/workloads/rancher-workload.json (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/NOTES.txt (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/_helpers.tpl (76%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/alertmanager.yaml (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/cleanupSecret.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/ingress.yaml (70%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/ingressperreplica.yaml (76%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/podDisruptionBudget.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/psp-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/psp-rolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/psp.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/secret.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/serviceaccount.yaml (77%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/servicemonitor.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/alertmanager/serviceperreplica.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/core-dns/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/core-dns/servicemonitor.yaml (89%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-api-server/servicemonitor.yaml (82%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-controller-manager/endpoints.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-controller-manager/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-controller-manager/servicemonitor.yaml (92%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-dns/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-dns/servicemonitor.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-etcd/endpoints.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-etcd/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-etcd/servicemonitor.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-proxy/endpoints.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-proxy/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-proxy/servicemonitor.yaml (91%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-scheduler/endpoints.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-scheduler/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-scheduler/servicemonitor.yaml (92%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kube-state-metrics/serviceMonitor.yaml (79%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/kubelet/servicemonitor.yaml (81%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/exporters/node-exporter/servicemonitor.yaml (64%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/configmap-dashboards.yaml (81%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/configmaps-datasources.yaml (87%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/apiserver.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/cluster-total.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/controller-manager.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/templates/grafana/dashboards => 16.6.0/templates/grafana/dashboards-1.14}/etcd.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/k8s-coredns.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml (82%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/k8s-resources-node.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml => 16.6.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml} (82%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/kubelet.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/namespace-by-pod.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/namespace-by-workload.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/node-rsrc-use.yaml (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/nodes.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml (87%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/pod-total.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/prometheus.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/proxy.yaml (92%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/scheduler.yaml (90%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/statefulset.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/dashboards-1.14/workload-total.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/namespaces.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/grafana/servicemonitor.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml (89%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml (89%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/certmanager.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/deployment.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/psp-clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/psp-clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/psp.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/service.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/serviceaccount.yaml (71%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus-operator/servicemonitor.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/_rules.tpl (98%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/additionalAlertRelabelConfigs.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/additionalAlertmanagerConfigs.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/additionalPrometheusRules.yaml (89%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/additionalScrapeConfigs.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/ingress.yaml (56%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/ingressThanosSidecar.yaml (70%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/ingressperreplica.yaml (75%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/nginx-config.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/podDisruptionBudget.yaml (95%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/podmonitors.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/prometheus.yaml (93%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/psp-clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/psp-clusterrolebinding.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/psp.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/alertmanager.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/etcd.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/general.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/k8s.rules.yaml (72%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kube-state-metrics.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubelet.rules.yaml (82%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-apps.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-resources.yaml (91%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-storage.yaml (82%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml (99%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml (95%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/kubernetes-system.yaml (94%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/node-exporter.rules.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/node-exporter.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/node-network.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/node.rules.yaml (96%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/prometheus-operator.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/rules-1.14/prometheus.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/service.yaml (98%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/serviceThanosSidecar.yaml (96%) create mode 100644 charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceThanosSidecarExternal.yaml rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/serviceaccount.yaml (77%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/servicemonitor.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/servicemonitors.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/prometheus/serviceperreplica.yaml (97%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/clusterrole.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/config-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboard-role.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml (80%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml (71%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml (73%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/rancher-monitoring/hardened.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/templates/validate-install-crd.yaml (100%) rename charts/rancher-monitoring/rancher-monitoring/{14.5.101+up14.5.0 => 16.6.0}/values.yaml (92%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/.helmignore (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/Chart.yaml (95%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/OWNERS (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/README.md (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/ci/port-values.yaml (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/NOTES.txt (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/_helpers.tpl (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/daemonset.yaml (97%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/endpoints.yaml (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/monitor.yaml (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/psp-clusterrole.yaml (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/psp-clusterrolebinding.yaml (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/psp.yaml (92%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/service.yaml (100%) rename charts/rancher-node-exporter/rancher-node-exporter/{1.16.201+up1.16.2 => 1.18.100+up1.18.1}/templates/serviceaccount.yaml (100%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter => rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1}/values.yaml (97%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/.helmignore (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/Chart.yaml (93%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter => rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0}/README.md (98%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/ci/default-values.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/ci/external-rules-values.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/NOTES.txt (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/_helpers.tpl (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/certmanager.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/cluster-role-binding-auth-delegator.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/cluster-role-binding-resource-reader.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/cluster-role-resource-reader.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/configmap.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/custom-metrics-apiservice.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/custom-metrics-cluster-role-binding-hpa.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/custom-metrics-cluster-role.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/deployment.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/external-metrics-apiservice.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/external-metrics-cluster-role-binding-hpa.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/external-metrics-cluster-role.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/pdb.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/psp.yaml (95%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/resource-metrics-apiservice.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/resource-metrics-cluster-role-binding.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/resource-metrics-cluster-role.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/role-binding-auth-reader.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/secret.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/service.yaml (100%) rename charts/rancher-prometheus-adapter/rancher-prometheus-adapter/{2.12.101+up2.12.1 => 2.14.0}/templates/serviceaccount.yaml (75%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter => rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0}/values.yaml (93%) create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/.helmignore create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/Chart.yaml create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/README.md create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/_helpers.tpl create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients-rbac.yaml create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients.yaml create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy-rbac.yaml create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy.yaml create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-servicemonitor.yaml create mode 100644 charts/rancher-pushprox/rancher-pushprox/0.1.400/values.yaml delete mode 100644 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/_helpers.tpl create mode 100644 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/.helmignore rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/Chart.yaml (92%) rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/README.md (100%) rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/scripts/check-wins-version.ps1 (100%) rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/scripts/proxy-entry.ps1 (100%) rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/scripts/run.ps1 (100%) create mode 100644 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/_helpers.tpl rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/templates/configmap.yaml (100%) rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/templates/daemonset.yaml (100%) create mode 100644 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/prometheusrule.yaml rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/templates/rbac.yaml (100%) rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/templates/service.yaml (100%) rename charts/rancher-windows-exporter/rancher-windows-exporter/{0.1.0 => 0.1.100}/templates/servicemonitor.yaml (88%) rename charts/{rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter => rancher-windows-exporter/rancher-windows-exporter/0.1.100}/values.yaml (91%) diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/.helmignore b/charts/rancher-grafana/rancher-grafana/6.11.0/.helmignore similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/.helmignore rename to charts/rancher-grafana/rancher-grafana/6.11.0/.helmignore diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/Chart.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/Chart.yaml similarity index 94% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/Chart.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/Chart.yaml index 1840e746cff..d213eafcaf2 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/Chart.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-grafana apiVersion: v2 -appVersion: 7.4.5 +appVersion: 7.5.8 description: The leading tool for querying and visualizing time series and metrics. home: https://grafana.net icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png @@ -25,4 +25,4 @@ name: rancher-grafana sources: - https://github.com/grafana/grafana type: application -version: 6.6.402+up6.6.4 +version: 6.11.0 diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/README.md b/charts/rancher-grafana/rancher-grafana/6.11.0/README.md similarity index 96% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/README.md rename to charts/rancher-grafana/rancher-grafana/6.11.0/README.md index 957f019ecf7..3d1d73e481a 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/README.md +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/README.md @@ -63,6 +63,7 @@ This version requires Helm >= 3.1.0. | `image.sha` | Image sha (optional) | `2b56f6106ddc376bb46d974230d530754bf65a640dfbc5245191d72d3b49efc6` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `image.pullSecrets` | Image pull secrets | `{}` | +| `service.enabled` | Enable grafana service | `true` | | `service.type` | Kubernetes service type | `ClusterIP` | | `service.port` | Kubernetes port where service is exposed | `80` | | `service.portName` | Name of the port on the service | `service` | @@ -82,7 +83,7 @@ This version requires Helm >= 3.1.0. | `ingress.path` | Ingress accepted path | `/` | | `ingress.pathType` | Ingress type of path | `Prefix` | | `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | -| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | | `ingress.tls` | Ingress TLS configuration | `[]` | | `resources` | CPU/Memory resource requests/limits | `{}` | | `nodeSelector` | Node labels for pod assignment | `{}` | @@ -157,13 +158,16 @@ This version requires Helm >= 3.1.0. | `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | | `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | | `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | | `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | -| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | | `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | | `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | | `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | | `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | | `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | @@ -215,6 +219,7 @@ This version requires Helm >= 3.1.0. | `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | | `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | | `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | | `imageRenderer.service.portName` | image-renderer service port name | `'http'` | | `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` | | `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | @@ -242,6 +247,9 @@ ingress: ### Example of extraVolumeMounts +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If none existingClaim or hostPath argument is givent then type is emptyDir. + ```yaml - extraVolumeMounts: - name: plugins @@ -249,6 +257,10 @@ ingress: subPath: configs/grafana/plugins existingClaim: existing-grafana-claim readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false ``` ## Import dashboards diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/dashboards/custom-dashboard.json b/charts/rancher-grafana/rancher-grafana/6.11.0/dashboards/custom-dashboard.json similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/dashboards/custom-dashboard.json rename to charts/rancher-grafana/rancher-grafana/6.11.0/dashboards/custom-dashboard.json diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/NOTES.txt b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/NOTES.txt similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/NOTES.txt rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/NOTES.txt diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/_helpers.tpl b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/_helpers.tpl similarity index 90% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/_helpers.tpl rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/_helpers.tpl index 76ad78876f5..03da0ff33c6 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/_helpers.tpl +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/_helpers.tpl @@ -143,3 +143,16 @@ Return the appropriate apiVersion for rbac. {{- print "rbac.authorization.k8s.io/v1beta1" -}} {{- end -}} {{- end -}} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) -}} + {{- if $secret -}} + {{- index $secret "data" "admin-password" -}} + {{- else -}} + {{- (randAlphaNum 40) | b64enc | quote -}} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/_pod.tpl b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/_pod.tpl similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/_pod.tpl rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/_pod.tpl index 2ba9f115ca2..9d19b4a3223 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/_pod.tpl +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/_pod.tpl @@ -100,7 +100,7 @@ initContainers: - name: FOLDER value: "/etc/grafana/provisioning/datasources" - name: RESOURCE - value: "both" + value: {{ quote .Values.sidecar.datasources.resource }} {{- if .Values.sidecar.enableUniqueFilenames }} - name: UNIQUE_FILENAMES value: "{{ .Values.sidecar.enableUniqueFilenames }}" @@ -135,7 +135,7 @@ initContainers: - name: FOLDER value: "/etc/grafana/provisioning/notifiers" - name: RESOURCE - value: "both" + value: {{ quote .Values.sidecar.notifiers.resource }} {{- if .Values.sidecar.enableUniqueFilenames }} - name: UNIQUE_FILENAMES value: "{{ .Values.sidecar.enableUniqueFilenames }}" @@ -184,7 +184,7 @@ containers: - name: FOLDER value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - name: RESOURCE - value: "both" + value: {{ quote .Values.sidecar.dashboards.resource }} {{- if .Values.sidecar.enableUniqueFilenames }} - name: UNIQUE_FILENAMES value: "{{ .Values.sidecar.enableUniqueFilenames }}" @@ -317,14 +317,14 @@ containers: containerPort: 3000 protocol: TCP env: - {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - name: GF_SECURITY_ADMIN_USER valueFrom: secretKeyRef: name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} key: {{ .Values.admin.userKey | default "admin-user" }} {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - name: GF_SECURITY_ADMIN_PASSWORD valueFrom: secretKeyRef: @@ -356,6 +356,14 @@ containers: - name: GF_RENDERING_CALLBACK_URL value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} {{ end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} {{- range $key, $value := .Values.envValueFrom }} - name: {{ $key | quote }} valueFrom: @@ -483,8 +491,15 @@ volumes: {{- end }} {{- range .Values.extraVolumeMounts }} - name: {{ .name }} + {{- if .existingClaim }} persistentVolumeClaim: claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + path: {{ .hostPath }} + {{- else }} + emptyDir: {} + {{- end }} {{- end }} {{- range .Values.extraEmptyDirMounts }} - name: {{ .name }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/clusterrole.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/clusterrole.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/clusterrole.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/clusterrole.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/clusterrolebinding.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/clusterrolebinding.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/clusterrolebinding.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/clusterrolebinding.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/configmap-dashboard-provider.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/configmap-dashboard-provider.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/configmap-dashboard-provider.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/configmap-dashboard-provider.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/configmap.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/configmap.yaml similarity index 95% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/configmap.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/configmap.yaml index de32b7ab2d2..c72219fb807 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/configmap.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/configmap.yaml @@ -19,8 +19,10 @@ data: {{- range $elem, $elemVal := $value }} {{- if kindIs "invalid" $elemVal }} {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} {{- else }} - {{ $elem }} = {{ tpl (toYaml $elemVal) $ }} + {{ $elem }} = {{ $elemVal }} {{- end }} {{- end }} {{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/dashboards-json-configmap.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/dashboards-json-configmap.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/dashboards-json-configmap.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/dashboards-json-configmap.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/deployment.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/deployment.yaml similarity index 83% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/deployment.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/deployment.yaml index 4d77794cd9b..1c9ae863816 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/deployment.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/deployment.yaml @@ -14,7 +14,9 @@ metadata: {{ toYaml . | indent 4 }} {{- end }} spec: + {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicas }} + {{- end }} revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} selector: matchLabels: @@ -34,7 +36,7 @@ spec: checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} -{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} {{- end }} {{- if .Values.envRenderSecret }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/headless-service.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/headless-service.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/headless-service.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/headless-service.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.11.0/templates/hpa.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/hpa.yaml new file mode 100644 index 00000000000..9c186d74ac1 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/hpa.yaml @@ -0,0 +1,20 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "grafana.name" . }} + helm.sh/chart: {{ template "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: +{{ toYaml .Values.autoscaling.metrics | indent 4 }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/image-renderer-deployment.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/image-renderer-deployment.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/image-renderer-deployment.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/image-renderer-deployment.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/image-renderer-network-policy.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/image-renderer-network-policy.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/image-renderer-network-policy.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/image-renderer-network-policy.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/image-renderer-service.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/image-renderer-service.yaml similarity index 94% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/image-renderer-service.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/image-renderer-service.yaml index f5d3eb02f91..f29586c3ac2 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/image-renderer-service.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/image-renderer-service.yaml @@ -1,4 +1,5 @@ {{ if .Values.imageRenderer.enabled }} +{{ if .Values.imageRenderer.service.enabled }} apiVersion: v1 kind: Service metadata: @@ -26,3 +27,4 @@ spec: selector: {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} {{ end }} +{{ end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/ingress.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/ingress.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/ingress.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/ingress.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/nginx-config.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/nginx-config.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/nginx-config.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/nginx-config.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/poddisruptionbudget.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/poddisruptionbudget.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/poddisruptionbudget.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/poddisruptionbudget.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/podsecuritypolicy.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/podsecuritypolicy.yaml similarity index 71% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/podsecuritypolicy.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/podsecuritypolicy.yaml index 19da5079173..f7c5941ab68 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/podsecuritypolicy.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/podsecuritypolicy.yaml @@ -13,19 +13,8 @@ spec: privileged: false allowPrivilegeEscalation: false requiredDropCapabilities: - # Default set from Docker, without DAC_OVERRIDE or CHOWN - - FOWNER - - FSETID - - KILL - - SETGID - - SETUID - - SETPCAP - - NET_BIND_SERVICE - - NET_RAW - - SYS_CHROOT - - MKNOD - - AUDIT_WRITE - - SETFCAP + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL volumes: - 'configMap' - 'emptyDir' @@ -38,12 +27,20 @@ spec: hostIPC: false hostPID: false runAsUser: - rule: 'RunAsAny' + rule: 'MustRunAsNonRoot' seLinux: rule: 'RunAsAny' supplementalGroups: - rule: 'RunAsAny' + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 fsGroup: - rule: 'RunAsAny' + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 readOnlyRootFilesystem: false {{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/pvc.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/pvc.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/pvc.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/pvc.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/role.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/role.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/role.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/role.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/rolebinding.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/rolebinding.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/rolebinding.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/rolebinding.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/secret-env.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/secret-env.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/secret-env.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/secret-env.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/secret.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/secret.yaml similarity index 62% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/secret.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/secret.yaml index 4fdd817dae5..6d06cf584f4 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/secret.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/secret.yaml @@ -1,4 +1,4 @@ -{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} apiVersion: v1 kind: Secret metadata: @@ -6,6 +6,10 @@ metadata: namespace: {{ template "grafana.namespace" . }} labels: {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} type: Opaque data: {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} @@ -13,7 +17,7 @@ data: {{- if .Values.adminPassword }} admin-password: {{ .Values.adminPassword | b64enc | quote }} {{- else }} - admin-password: {{ randAlphaNum 40 | b64enc | quote }} + admin-password: {{ template "grafana.password" . }} {{- end }} {{- end }} {{- if not .Values.ldap.existingSecret }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/service.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/service.yaml similarity index 97% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/service.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/service.yaml index 2764566986b..ba84ef97046 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/service.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/service.yaml @@ -1,3 +1,4 @@ +{{ if .Values.service.enabled }} apiVersion: v1 kind: Service metadata: @@ -47,4 +48,4 @@ spec: {{- end }} selector: {{- include "grafana.selectorLabels" . | nindent 4 }} - +{{ end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/serviceaccount.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/serviceaccount.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/serviceaccount.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/serviceaccount.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/servicemonitor.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/servicemonitor.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/servicemonitor.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/servicemonitor.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/statefulset.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/statefulset.yaml similarity index 88% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/statefulset.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/statefulset.yaml index 55c159c9f5f..802768645a4 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/statefulset.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/statefulset.yaml @@ -27,7 +27,7 @@ spec: checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} {{- end }} {{- with .Values.podAnnotations }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-configmap.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-configmap.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-configmap.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-configmap.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-podsecuritypolicy.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-podsecuritypolicy.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-podsecuritypolicy.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-podsecuritypolicy.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-role.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-role.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-role.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-role.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-rolebinding.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-rolebinding.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-rolebinding.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-rolebinding.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-serviceaccount.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-serviceaccount.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test-serviceaccount.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test-serviceaccount.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test.yaml similarity index 100% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/tests/test.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/templates/tests/test.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/values.yaml b/charts/rancher-grafana/rancher-grafana/6.11.0/values.yaml similarity index 96% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/values.yaml rename to charts/rancher-grafana/rancher-grafana/6.11.0/values.yaml index 9491c1a1f96..52466a9fcb3 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/values.yaml +++ b/charts/rancher-grafana/rancher-grafana/6.11.0/values.yaml @@ -38,6 +38,22 @@ serviceAccount: replicas: 1 +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# - type: Resource +# resource: +# name: cpu +# targetAverageUtilization: 60 +# - type: Resource +# resource: +# name: memory +# targetAverageUtilization: 60 + ## See `kubectl explain poddisruptionbudget.spec` for more ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ podDisruptionBudget: {} @@ -69,7 +85,7 @@ livenessProbe: image: repository: rancher/mirrored-grafana-grafana - tag: 7.4.5 + tag: 7.5.8 sha: "" pullPolicy: IfNotPresent @@ -119,7 +135,7 @@ extraLabels: {} downloadDashboardsImage: repository: rancher/mirrored-curlimages-curl - tag: 7.73.0 + tag: 7.77.0 sha: "" pullPolicy: IfNotPresent @@ -144,6 +160,7 @@ podPortName: grafana ## ref: http://kubernetes.io/docs/user-guide/services/ ## service: + enabled: true type: ClusterIP port: 80 targetPort: 3000 @@ -420,10 +437,14 @@ extraSecretMounts: [] ## Additional grafana server volume mounts # Defines additional volume mounts. extraVolumeMounts: [] - # - name: extra-volume - # mountPath: /mnt/volume + # - name: extra-volume-0 + # mountPath: /mnt/volume0 # readOnly: true # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ ## Pass the plugins you want installed as a list. ## @@ -530,7 +551,7 @@ dashboardsConfigMaps: {} ## grafana.ini: paths: - data: /var/lib/grafana/data + data: /var/lib/grafana/ logs: /var/log/grafana plugins: /var/lib/grafana/plugins provisioning: /etc/grafana/provisioning @@ -601,7 +622,7 @@ smtp: sidecar: image: repository: rancher/mirrored-kiwigrid-k8s-sidecar - tag: 1.10.7 + tag: 1.12.2 sha: "" imagePullPolicy: IfNotPresent resources: {} @@ -629,6 +650,8 @@ sidecar: # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null + # search in configmap, secret or both + resource: both # If specified, the sidecar will look for annotation with this name to create folder and put graph here. # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. folderAnnotation: null @@ -658,10 +681,8 @@ sidecar: # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null - - ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment - ## This can be useful for database passwords, etc. Value is templated. - envFromSecret: "" + # search in configmap, secret or both + resource: both notifiers: enabled: false # label that the configmaps with notifiers are marked with @@ -670,6 +691,8 @@ sidecar: # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null + # search in configmap, secret or both + resource: both ## Override the deployment namespace ## @@ -688,7 +711,7 @@ imageRenderer: # image-renderer Image repository repository: rancher/mirrored-grafana-grafana-image-renderer # image-renderer Image tag - tag: 2.0.1 + tag: 3.0.1 # image-renderer Image sha (optional) sha: "" # image-renderer ImagePullPolicy @@ -707,6 +730,8 @@ imageRenderer: # image-renderer deployment priority class priorityClassName: '' service: + # Enable the image-renderer service + enabled: true # image-renderer service port name portName: 'http' # image-renderer service port used by both service and deployment diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/LICENSE b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/LICENSE deleted file mode 100644 index 393b7a33b5c..00000000000 --- a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright The Helm Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/.helmignore b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/.helmignore similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/.helmignore rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/.helmignore diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/Chart.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/Chart.yaml similarity index 90% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/Chart.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/Chart.yaml index 524918a9de6..2b7cac49b1b 100644 --- a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/Chart.yaml +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/Chart.yaml @@ -4,8 +4,8 @@ annotations: catalog.rancher.io/certified: rancher catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-kube-state-metrics -apiVersion: v1 -appVersion: 1.9.8 +apiVersion: v2 +appVersion: 2.0.0 description: Install kube-state-metrics to generate and expose cluster-level metrics home: https://github.com/kubernetes/kube-state-metrics/ keywords: @@ -21,4 +21,5 @@ maintainers: name: rancher-kube-state-metrics sources: - https://github.com/kubernetes/kube-state-metrics/ -version: 2.13.101+up2.13.1 +type: application +version: 3.2.0 diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/OWNERS b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/OWNERS similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/OWNERS rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/OWNERS diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/README.md b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/README.md similarity index 56% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/README.md rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/README.md index e93a3d25245..7c2e16918f3 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/README.md +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/README.md @@ -5,7 +5,7 @@ Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state ## Get Repo Info ```console -helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm repo update ``` @@ -14,11 +14,7 @@ _See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation ## Install Chart ```console -# Helm 3 -$ helm install [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] - -# Helm 2 -$ helm install --name [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +helm install [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] ``` _See [configuration](#configuration) below._ @@ -28,11 +24,7 @@ _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documen ## Uninstall Chart ```console -# Helm 3 -$ helm uninstall [RELEASE_NAME] - -# Helm 2 -# helm delete --purge [RELEASE_NAME] +helm uninstall [RELEASE_NAME] ``` This removes all the Kubernetes components associated with the chart and deletes the release. @@ -42,25 +34,35 @@ _See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command doc ## Upgrading Chart ```console -# Helm 3 or 2 -$ helm upgrade [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +helm upgrade [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] ``` _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ -### From stable/kube-state-metrics +### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics You can upgrade in-place: 1. [get repo info](#get-repo-info) 1. [upgrade](#upgrading-chart) your existing release name using the new chart repo + +## Upgrading to v3.0.0 + +v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. + +The upgraded chart now the following changes: +* Dropped support for helm v2 (helm v3 or later is required) +* collectors key was renamed to resources +* namespace key was renamed to namespaces + + ## Configuration See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: ```console -helm show values kube-state-metrics/kube-state-metrics +helm show values prometheus-community/kube-state-metrics ``` -You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options. +You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options. diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/NOTES.txt b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/NOTES.txt similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/NOTES.txt rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/NOTES.txt diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/_helpers.tpl b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/_helpers.tpl similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/_helpers.tpl rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/_helpers.tpl diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/clusterrolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/clusterrolebinding.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/clusterrolebinding.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/deployment.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/deployment.yaml similarity index 81% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/deployment.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/deployment.yaml index 4ab55291b10..f338308ad9e 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/deployment.yaml +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/deployment.yaml @@ -70,91 +70,91 @@ spec: {{- end }} {{ end }} {{ if .Values.collectors.certificatesigningrequests }} - - --collectors=certificatesigningrequests + - --resources=certificatesigningrequests {{ end }} {{ if .Values.collectors.configmaps }} - - --collectors=configmaps + - --resources=configmaps {{ end }} {{ if .Values.collectors.cronjobs }} - - --collectors=cronjobs + - --resources=cronjobs {{ end }} {{ if .Values.collectors.daemonsets }} - - --collectors=daemonsets + - --resources=daemonsets {{ end }} {{ if .Values.collectors.deployments }} - - --collectors=deployments + - --resources=deployments {{ end }} {{ if .Values.collectors.endpoints }} - - --collectors=endpoints + - --resources=endpoints {{ end }} {{ if .Values.collectors.horizontalpodautoscalers }} - - --collectors=horizontalpodautoscalers + - --resources=horizontalpodautoscalers {{ end }} {{ if .Values.collectors.ingresses }} - - --collectors=ingresses + - --resources=ingresses {{ end }} {{ if .Values.collectors.jobs }} - - --collectors=jobs + - --resources=jobs {{ end }} {{ if .Values.collectors.limitranges }} - - --collectors=limitranges + - --resources=limitranges {{ end }} {{ if .Values.collectors.mutatingwebhookconfigurations }} - - --collectors=mutatingwebhookconfigurations + - --resources=mutatingwebhookconfigurations {{ end }} {{ if .Values.collectors.namespaces }} - - --collectors=namespaces + - --resources=namespaces {{ end }} {{ if .Values.collectors.networkpolicies }} - - --collectors=networkpolicies + - --resources=networkpolicies {{ end }} {{ if .Values.collectors.nodes }} - - --collectors=nodes + - --resources=nodes {{ end }} {{ if .Values.collectors.persistentvolumeclaims }} - - --collectors=persistentvolumeclaims + - --resources=persistentvolumeclaims {{ end }} {{ if .Values.collectors.persistentvolumes }} - - --collectors=persistentvolumes + - --resources=persistentvolumes {{ end }} {{ if .Values.collectors.poddisruptionbudgets }} - - --collectors=poddisruptionbudgets + - --resources=poddisruptionbudgets {{ end }} {{ if .Values.collectors.pods }} - - --collectors=pods + - --resources=pods {{ end }} {{ if .Values.collectors.replicasets }} - - --collectors=replicasets + - --resources=replicasets {{ end }} {{ if .Values.collectors.replicationcontrollers }} - - --collectors=replicationcontrollers + - --resources=replicationcontrollers {{ end }} {{ if .Values.collectors.resourcequotas }} - - --collectors=resourcequotas + - --resources=resourcequotas {{ end }} {{ if .Values.collectors.secrets }} - - --collectors=secrets + - --resources=secrets {{ end }} {{ if .Values.collectors.services }} - - --collectors=services + - --resources=services {{ end }} {{ if .Values.collectors.statefulsets }} - - --collectors=statefulsets + - --resources=statefulsets {{ end }} {{ if .Values.collectors.storageclasses }} - - --collectors=storageclasses + - --resources=storageclasses {{ end }} {{ if .Values.collectors.validatingwebhookconfigurations }} - - --collectors=validatingwebhookconfigurations + - --resources=validatingwebhookconfigurations {{ end }} {{ if .Values.collectors.verticalpodautoscalers }} - - --collectors=verticalpodautoscalers + - --resources=verticalpodautoscalers {{ end }} {{ if .Values.collectors.volumeattachments }} - - --collectors=volumeattachments + - --resources=volumeattachments {{ end }} -{{ if .Values.namespace }} - - --namespace={{ .Values.namespace | join "," }} +{{ if .Values.namespaces }} + - --namespaces={{ tpl .Values.namespaces $ | join "," }} {{ end }} {{ if .Values.autosharding.enabled }} - --pod=$(POD_NAME) @@ -177,6 +177,9 @@ spec: image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" ports: - containerPort: 8080 +{{- if .Values.selfMonitor.enabled }} + - containerPort: 8081 +{{- end }} livenessProbe: httpGet: path: /healthz @@ -193,6 +196,10 @@ spec: resources: {{ toYaml .Values.resources | indent 10 }} {{- end }} +{{- if .Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 10 }} +{{- end }} {{- if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 8 }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/kubeconfig-secret.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/kubeconfig-secret.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/kubeconfig-secret.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/kubeconfig-secret.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/pdb.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/pdb.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/pdb.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/pdb.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/podsecuritypolicy.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/podsecuritypolicy.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/podsecuritypolicy.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/podsecuritypolicy.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/psp-clusterrole.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/psp-clusterrole.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/psp-clusterrole.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/psp-clusterrole.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/psp-clusterrolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/psp-clusterrolebinding.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/psp-clusterrolebinding.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/psp-clusterrolebinding.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/role.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/role.yaml similarity index 94% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/role.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/role.yaml index 6259d2f6175..25c8bc89337 100644 --- a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/role.yaml +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/role.yaml @@ -1,11 +1,8 @@ -{{- if and (eq $.Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} -{{- if eq .Values.rbac.useClusterRole false }} -{{- range (split "," $.Values.namespace) }} -{{- end }} -{{- end -}} +{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- range (split "," .Values.namespaces) }} --- apiVersion: rbac.authorization.k8s.io/v1 -{{- if eq .Values.rbac.useClusterRole false }} +{{- if eq $.Values.rbac.useClusterRole false }} kind: Role {{- else }} kind: ClusterRole @@ -17,7 +14,7 @@ metadata: app.kubernetes.io/managed-by: {{ $.Release.Service }} app.kubernetes.io/instance: {{ $.Release.Name }} name: {{ template "kube-state-metrics.fullname" $ }} -{{- if eq .Values.rbac.useClusterRole false }} +{{- if eq $.Values.rbac.useClusterRole false }} namespace: {{ . }} {{- end }} rules: @@ -190,3 +187,4 @@ rules: verbs: ["list", "watch"] {{ end -}} {{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/rolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/rolebinding.yaml similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/rolebinding.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/rolebinding.yaml index 732174a3340..72a1a2e904c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/rolebinding.yaml +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/rolebinding.yaml @@ -1,5 +1,5 @@ {{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} -{{- range (split "," $.Values.namespace) }} +{{- range (split "," $.Values.namespaces) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/service.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/service.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/service.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/service.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/serviceaccount.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/serviceaccount.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/serviceaccount.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/serviceaccount.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/servicemonitor.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/servicemonitor.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/servicemonitor.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/servicemonitor.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/stsdiscovery-role.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/stsdiscovery-role.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/stsdiscovery-role.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/stsdiscovery-role.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/stsdiscovery-rolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/stsdiscovery-rolebinding.yaml similarity index 100% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/stsdiscovery-rolebinding.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/templates/stsdiscovery-rolebinding.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/values.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/values.yaml similarity index 91% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/values.yaml rename to charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/values.yaml index f64645690e6..052e534de3f 100644 --- a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/values.yaml +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/3.2.0/values.yaml @@ -6,7 +6,7 @@ global: prometheusScrape: true image: repository: rancher/mirrored-kube-state-metrics-kube-state-metrics - tag: v1.9.8 + tag: v2.0.0 pullPolicy: IfNotPresent imagePullSecrets: [] @@ -93,6 +93,11 @@ securityContext: runAsUser: 65534 fsGroup: 65534 +## Specify security settings for a Container +## Allows overrides and additional options compared to (Pod) securityContext +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +containerSecurityContext: {} + ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} @@ -115,7 +120,7 @@ podAnnotations: {} podDisruptionBudget: {} # Available collectors for kube-state-metrics. By default all available -# collectors are enabled. +# resources are enabled. collectors: certificatesigningrequests: true configmaps: true @@ -152,8 +157,8 @@ kubeconfig: # base64 encoded kube-config file secret: -# Namespace to be enabled for collecting resources. By default all namespaces are collected. -# namespace: "" +# Comma-separated list of namespaces to be enabled for collecting resources. By default all namespaces are collected. +namespaces: "" ## Override the deployment namespace ## @@ -177,7 +182,7 @@ resources: {} kubeTargetVersionOverride: "" # Enable self metrics configuration for service and Service Monitor -# Default values for telemetry configuration can be overriden +# Default values for telemetry configuration can be overridden selfMonitor: enabled: false # telemetryHost: 0.0.0.0 diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/Chart.yaml similarity index 92% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/Chart.yaml index 2f1bd9e2981..81670d051a6 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/Chart.yaml @@ -7,4 +7,4 @@ apiVersion: v1 description: Installs the CRDs for rancher-monitoring. name: rancher-monitoring-crd type: application -version: 14.5.101+up14.5.0 +version: 16.6.0 diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/README.md b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/README.md similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/README.md rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/README.md diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-alertmanagerconfigs.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-alertmanagerconfigs.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-alertmanagerconfigs.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-alertmanagerconfigs.yaml index b2ed161866f..a6988a7d3e0 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-alertmanagerconfigs.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-alertmanagerconfigs.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml --- apiVersion: apiextensions.k8s.io/v1 diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-alertmanagers.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-alertmanagers.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-alertmanagers.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-alertmanagers.yaml index 724d488b0fd..486ee7ee59d 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-alertmanagers.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-alertmanagers.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml --- apiVersion: apiextensions.k8s.io/v1 diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-podmonitors.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-podmonitors.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-podmonitors.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-podmonitors.yaml index d474a0c0da9..c16e955c991 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-podmonitors.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-podmonitors.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml --- apiVersion: apiextensions.k8s.io/v1 @@ -161,7 +161,7 @@ spec: description: ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. type: string relabelings: - description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + description: 'RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields and replaces original scrape job name with __tmp_prometheus_job_name. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-probes.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-probes.yaml similarity index 60% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-probes.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-probes.yaml index 7fd658e1427..4b3f92c6dd6 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-probes.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-probes.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml --- apiVersion: apiextensions.k8s.io/v1 @@ -35,6 +35,55 @@ spec: spec: description: Specification of desired Ingress selection for target discovery by Prometheus. properties: + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over basic authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping targets. The secret needs to be in the same namespace as the probe and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object interval: description: Interval at which targets are probed using the configured prober. If not specified Prometheus' global scrape interval is used. type: string @@ -188,6 +237,99 @@ spec: type: array type: object type: object + tlsConfig: + description: TLS configuration to use when scraping the endpoint. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object type: object required: - spec diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-prometheuses.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-prometheuses.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-prometheuses.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-prometheuses.yaml index c3f13d98177..cea3585f658 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-prometheuses.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-prometheuses.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml --- apiVersion: apiextensions.k8s.io/v1 @@ -1467,6 +1467,11 @@ spec: enableAdminAPI: description: 'Enable access to prometheus web admin API. Defaults to the value of `false`. WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, shutdown Prometheus, and more. Enabling this should be done with care and the user is advised to add additional authentication authorization via a proxy to ensure only clients authorized to perform these actions can do so. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis' type: boolean + enableFeatures: + description: Enable access to Prometheus disabled features. By default, no features are enabled. Enabling disabled features is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice. For more information see https://prometheus.io/docs/prometheus/latest/disabled_features/ + items: + type: string + type: array enforcedNamespaceLabel: description: EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. The label value will always be the namespace of the object that is being created. type: string @@ -2465,7 +2470,7 @@ spec: type: object type: object bearerToken: - description: bearer token for remote read. + description: Bearer token for remote read. type: string bearerTokenFile: description: File to read bearer token for remote read. @@ -2636,7 +2641,7 @@ spec: type: object type: object bearerToken: - description: File to read bearer token for remote write. + description: Bearer token for remote write. type: string bearerTokenFile: description: File to read bearer token for remote write. @@ -2646,6 +2651,16 @@ spec: type: string description: Custom HTTP headers to be sent along with each remote write request. Be aware that headers that are set by Prometheus itself can't be overwritten. Only valid in Prometheus versions 2.25.0 and newer. type: object + metadataConfig: + description: MetadataConfig configures the sending of series metadata to remote storage. + properties: + send: + description: Whether metric metadata is sent to remote storage or not. + type: boolean + sendInterval: + description: How frequently metric metadata is sent to remote storage. + type: string + type: object name: description: The name of the remote write queue, must be unique if specified. The name is used in metrics and logging in order to differentiate queues. Only valid in Prometheus versions 2.15.0 and newer. type: string diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-prometheusrules.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-prometheusrules.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-prometheusrules.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-prometheusrules.yaml index 07a24df450c..1c33519d7f0 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-prometheusrules.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-prometheusrules.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml --- apiVersion: apiextensions.k8s.io/v1 diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-servicemonitors.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-servicemonitors.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-servicemonitors.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-servicemonitors.yaml index 9dee64ff98d..1c8a4a3541c 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-servicemonitors.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-servicemonitors.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml --- apiVersion: apiextensions.k8s.io/v1 @@ -149,7 +149,7 @@ spec: description: ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. type: string relabelings: - description: 'RelabelConfigs to apply to samples before scraping. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + description: 'RelabelConfigs to apply to samples before scraping. Prometheus Operator automatically adds relabelings for a few standard Kubernetes fields and replaces original scrape job name with __tmp_prometheus_job_name. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' items: description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' properties: diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-thanosrulers.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-thanosrulers.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-thanosrulers.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-thanosrulers.yaml index a470d4b9faf..9112650dd49 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/crd-manifest/crd-thanosrulers.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/crd-manifest/crd-thanosrulers.yaml @@ -1,4 +1,4 @@ -# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.48.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml --- apiVersion: apiextensions.k8s.io/v1 @@ -3339,4 +3339,4 @@ status: kind: "" plural: "" conditions: [] - storedVersions: [] \ No newline at end of file + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/_helpers.tpl similarity index 51% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/_helpers.tpl index 2da79e70f5f..edac2b315ef 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/_helpers.tpl +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/_helpers.tpl @@ -26,4 +26,25 @@ beta.kubernetes.io/os: linux {{- else -}} kubernetes.io/os: linux {{- end -}} +{{- end -}} + +# CRD Installation + +{{- define "crd.established" -}} +{{- if not (regexMatch "^([a-zA-Z]+[.][a-zA-Z]*)+$" .) -}} +{{ required (printf "%s is not a valid CRD" .) "" }} +{{- else -}} +echo "beginning wait for {{ . }} to be established..."; +num_tries=1; +until kubectl get crd {{ . }} -o=jsonpath='{range .status.conditions[*]}{.type}={.status} {end}' | grep -qE 'Established=True'; do + if (( num_tries == 30 )); then + echo "timed out waiting for {{ . }}"; + exit 1; + fi; + num_tries=$(( num_tries + 1 )); + echo "{{ . }} is not established. Sleeping for 2 seconds and trying again..."; + sleep 2; +done; +echo "successfully established {{ . }}"; +{{- end -}} {{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/jobs.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/jobs.yaml similarity index 70% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/jobs.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/jobs.yaml index 210e4ab3a91..6167ddbe223 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/jobs.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/jobs.yaml @@ -17,8 +17,8 @@ spec: spec: serviceAccountName: {{ .Chart.Name }}-manager securityContext: - runAsNonRoot: true - runAsUser: 1000 + runAsNonRoot: false + runAsUser: 0 initContainers: - name: set-preserve-unknown-fields-false image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} @@ -30,11 +30,14 @@ spec: {{- range $path, $_ := (.Files.Glob "crd-manifest/**.yaml") }} {{- $crd := get (get ($.Files.Get $path | fromYaml) "metadata") "name" }} if [[ -n "$(kubectl get crd {{ $crd }} -o jsonpath='{.spec.preserveUnknownFields}')" ]]; then - patch="{\"spec\": {\"preserveUnknownFields\": false}}" - if [[ -z "$(kubectl get crd {{ $crd }} -o jsonpath='{.spec.versions[0].schema}' 2>&1)" ]]; then - patch="{\"spec\": {\"preserveUnknownFields\": false, \"versions\": [{\"name\": \"v1\", \"served\": false, \"storage\": true}]}}" + patch='{"spec": {"preserveUnknownFields": false}}'; + if [[ -z "$(kubectl get crd {{ $crd }} -o jsonpath='{.spec.versions[0].schema}')" ]]; then + patch='{"spec": {"preserveUnknownFields": false, "versions": [{"name": "v1", "served": false, "storage": true, "schema": {"openAPIV3Schema": {"description": "placeholder", "type": "object"}}}]}}'; fi - kubectl patch crd {{ $crd }} -p "${patch}" --type="merge"; + echo "Applying patch to {{ $crd }}: ${patch}" + if kubectl patch crd {{ $crd }} -p "${patch}" --type="merge"; then + {{- include "crd.established" $crd | nindent 18 }} + fi; fi; {{- end }} containers: @@ -45,7 +48,20 @@ spec: - /bin/sh - -c - > - kubectl apply -f /etc/config/crd-manifest.yaml + echo "Applying CRDs..."; + kubectl apply -f /etc/config/crd-manifest.yaml; + + echo "Waiting for CRDs to be recognized before finishing installation..."; + + {{- range $path, $_ := (.Files.Glob "crd-manifest/**.yaml") }} + {{- $apiGroup := get (get ($.Files.Get $path | fromYaml) "spec") "group" }} + rm -rf $HOME/.kube/cache/discovery/*/{{ $apiGroup }}; + {{- end }} + + {{- range $path, $_ := (.Files.Glob "crd-manifest/**.yaml") }} + {{- $crd := get (get ($.Files.Get $path | fromYaml) "metadata") "name" }} + {{- include "crd.established" $crd | nindent 12 }} + {{- end }} volumeMounts: - name: crd-manifest readOnly: true @@ -77,8 +93,8 @@ spec: spec: serviceAccountName: {{ .Chart.Name }}-manager securityContext: - runAsNonRoot: true - runAsUser: 1000 + runAsNonRoot: false + runAsUser: 0 initContainers: - name: remove-finalizers image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} @@ -89,7 +105,9 @@ spec: - > {{- range $path, $_ := (.Files.Glob "crd-manifest/**.yaml") }} {{- $crd := get (get ($.Files.Get $path | fromYaml) "metadata") "name" }} - kubectl patch crd {{ $crd }} -p '{"metadata": {"finalizers": []}}' || true; + if kubectl patch crd {{ $crd }} -p '{"metadata": {"finalizers": []}}'; then + {{- include "crd.established" $crd | nindent 14 }} + fi; {{- end }} volumeMounts: - name: crd-manifest diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/manifest.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/manifest.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/manifest.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/manifest.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/rbac.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/rbac.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/rbac.yaml index bdda1ddadf9..1c07c7dd047 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/templates/rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/templates/rbac.yaml @@ -53,7 +53,7 @@ spec: hostIPC: false hostPID: false runAsUser: - rule: 'MustRunAsNonRoot' + rule: 'RunAsAny' seLinux: rule: 'RunAsAny' supplementalGroups: diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/values.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/values.yaml similarity index 79% rename from charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/values.yaml rename to charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/values.yaml index 8f3ebe37ab5..129d1391414 100644 --- a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.101+up14.5.0/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring-crd/16.6.0/values.yaml @@ -7,5 +7,5 @@ global: systemDefaultRegistry: "" image: - repository: rancher/rancher-agent - tag: v2.5.7 + repository: rancher/shell + tag: v0.1.8 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/LICENSE b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/LICENSE deleted file mode 100644 index 393b7a33b5c..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright The Helm Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/README.md deleted file mode 100644 index dcecc69daef..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# rancher-pushprox - -A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. - -Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. - -Using an instance of this chart is suitable for the following scenarios: -- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) -- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) -- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` -- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) -- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) - -The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. - -## Configuration - -The following tables list the configurable parameters of the rancher-pushprox chart and their default values. - -### General - -#### Required -| Parameter | Description | Example | -| ----- | ----------- | ------ | -| `component` | The component that is being monitored | `kube-etcd` -| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | - -#### Optional -| Parameter | Description | Default | -| ----- | ----------- | ------ | -| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | -| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | -| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | -| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | -| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | -| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | -| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | -| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | -| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | -| `clients.resources` | Set resource limits and requests for the client container | `{}` | -| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | -| `clients.tolerations` | Specify tolerations for clients | `[]` | -| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | -| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | -| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | -| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | -| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | - -*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. - -See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/_helpers.tpl deleted file mode 100644 index f77b8edf4f1..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/_helpers.tpl +++ /dev/null @@ -1,87 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# Windows Support - -{{/* -Windows cluster will add default taint for linux nodes, -add below linux tolerations to workloads could be scheduled to those linux nodes -*/}} - -{{- define "linux-node-tolerations" -}} -- key: "cattle.io/os" - value: "linux" - effect: "NoSchedule" - operator: "Equal" -{{- end -}} - -{{- define "linux-node-selector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: linux -{{- else -}} -kubernetes.io/os: linux -{{- end -}} -{{- end -}} - -# General - -{{- define "pushprox.namespace" -}} - {{- if .Values.namespaceOverride -}} - {{- .Values.namespaceOverride -}} - {{- else -}} - {{- .Release.Namespace -}} - {{- end -}} -{{- end -}} - -{{- define "pushProxy.commonLabels" -}} -release: {{ .Release.Name }} -component: {{ .Values.component | quote }} -provider: kubernetes -{{- end -}} - -{{- define "pushProxy.proxyUrl" -}} -{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} -{{- if .Values.clients.proxyUrl -}} -{{ printf "%s" .Values.clients.proxyUrl }} -{{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} -{{- end -}}{{- end -}} - -# Client - -{{- define "pushProxy.client.name" -}} -{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.client.labels" -}} -k8s-app: {{ template "pushProxy.client.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# Proxy - -{{- define "pushProxy.proxy.name" -}} -{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.proxy.labels" -}} -k8s-app: {{ template "pushProxy.proxy.name" . }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} - -# ServiceMonitor - -{{- define "pushprox.serviceMonitor.name" -}} -{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} -{{- end -}} - -{{- define "pushProxy.serviceMonitor.labels" -}} -app: {{ template "pushprox.serviceMonitor.name" . }} -release: {{ .Release.Name | quote }} -{{ template "pushProxy.commonLabels" . }} -{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml deleted file mode 100644 index 95346dee645..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml +++ /dev/null @@ -1,74 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.client.name" . }} -{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} -- nonResourceURLs: ["/metrics"] - verbs: ["get"] -{{- end }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.client.name" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.client.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: true - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 0 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - - 'emptyDir' - - 'hostPath' - allowedHostPaths: - - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - readOnly: true -{{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients.yaml deleted file mode 100644 index ed78792e5d9..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-clients.yaml +++ /dev/null @@ -1,135 +0,0 @@ -{{- if .Values.clients }}{{- if .Values.clients.enabled }} -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} - pushprox-exporter: "client" -spec: - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} - template: - metadata: - labels: {{ include "pushProxy.client.labels" . | nindent 8 }} - spec: - nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector | indent 8 }} -{{- end }} - tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} -{{- if .Values.clients.tolerations }} -{{ toYaml .Values.clients.tolerations | indent 8 }} -{{- end }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ template "pushProxy.client.name" . }} - containers: - - name: pushprox-client - image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} - command: - {{- range .Values.clients.command }} - - {{ . | quote }} - {{- end }} - args: - - --fqdn=$(HOST_IP) - - --proxy-url=$(PROXY_URL) - - --metrics-addr=$(PORT) - - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} - {{- if .Values.clients.useLocalhost }} - - --use-localhost - {{- end }} - {{- if .Values.clients.https.enabled }} - {{- if .Values.clients.https.insecureSkipVerify }} - - --insecure-skip-verify - {{- end }} - {{- if .Values.clients.https.useServiceAccountCredentials }} - - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token - {{- end }} - {{- if .Values.clients.https.certDir }} - - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem - - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem - - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem - {{- end }} - {{- end }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: PORT - value: :{{ .Values.clients.port }} - - name: PROXY_URL - value: {{ template "pushProxy.proxyUrl" . }} - securityContext: - runAsNonRoot: true - runAsUser: 1000 - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - volumeMounts: - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - {{- end }} - {{- if .Values.clients.resources }} - resources: {{ toYaml .Values.clients.resources | nindent 10 }} - {{- end }} - {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} - initContainers: - - name: copy-certs - image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} - command: - - sh - - -c - - | - echo "Searching for files to copy within the source volume" - echo "cert: ${CERT_FILE_NAME}" - echo "key: ${KEY_FILE_NAME}" - echo "cacert: ${CACERT_FILE_NAME}" - - CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) - KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) - CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) - - test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 - test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 - test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 - - echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" - cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 - chmod 444 $CERT_FILE_TARGET || exit 1 - - echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" - cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 - chmod 444 $KEY_FILE_TARGET || exit 1 - - echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" - cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 - chmod 444 $CACERT_FILE_TARGET || exit 1 - env: - - name: CERT_FILE_NAME - value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} - - name: KEY_FILE_NAME - value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} - - name: CACERT_FILE_NAME - value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} - - name: CERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy.pem - - name: KEY_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-key.pem - - name: CACERT_FILE_TARGET - value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem - securityContext: - runAsNonRoot: false - volumeMounts: - - name: metrics-cert-dir-source - mountPath: /etc/source - readOnly: true - - name: metrics-cert-dir - mountPath: /etc/ssl/push-proxy - volumes: - - name: metrics-cert-dir-source - hostPath: - path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} - - name: metrics-cert-dir - emptyDir: {} - {{- end }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml deleted file mode 100644 index a3509c16013..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -rules: -- apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: - - {{ template "pushProxy.proxy.name" . }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "pushProxy.proxy.name" . }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "pushProxy.proxy.name" . }} -subjects: - - kind: ServiceAccount - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} ---- -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} - labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} -spec: - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'MustRunAsNonRoot' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - - min: 1 - max: 65535 - readOnlyRootFilesystem: false - volumes: - - 'secret' -{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml deleted file mode 100644 index 2f3d7e54c94..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ template "pushprox.serviceMonitor.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} -spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} - jobLabel: component - podTargetLabels: - - component - - pushprox-exporter - namespaceSelector: - matchNames: - - {{ template "pushprox.namespace" . }} - selector: - matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ template "pushProxy.client.name" . }} - namespace: {{ template "pushprox.namespace" . }} - labels: {{ include "pushProxy.client.labels" . | nindent 4 }} -spec: - ports: - - name: metrics - port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} - protocol: TCP - targetPort: {{ .Values.metricsPort }} - selector: {{ include "pushProxy.client.labels" . | nindent 4 }} -{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/values.yaml deleted file mode 100644 index e1bcf79a5b7..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/values.yaml +++ /dev/null @@ -1,86 +0,0 @@ -# Default values for rancher-pushprox. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -# Default image containing both the proxy and the client was generated from the following Dockerfile -# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 - -# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) -# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, -# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), -# you will need to set the clients / proxy nodeSelector and tolerations accordingly - -# Configuration - -global: - cattle: - systemDefaultRegistry: "" - -# The component that is being monitored (i.e. etcd) -component: "component" - -# The port containing the metrics that need to be scraped -metricsPort: 2739 - -# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: - enabled: true - -clients: - enabled: true - # The port which the PushProx client will post PushProx metrics to - port: 9369 - # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} - # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null - proxyUrl: "" - # If set to true, the client will forward any requests from the host IP to 127.0.0.1 - # It will only allow proxy requests to the metricsPort specified - useLocalhost: false - # Configuration for accessing metrics via HTTPS - https: - # Does the client require https to access the metrics? - enabled: false - # If set to true, the client will create a service account with adequate permissions and set a flag - # on the client to use the service account token provided by it to make authorized scrape requests - useServiceAccountCredentials: false - # If set to true, the client will disable SSL security checks - insecureSkipVerify: false - # Directory on host where necessary TLS cert and key to scrape metrics can be found - certDir: "" - # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings - certFile: "" - keyFile: "" - caCertFile: "" - - # Resource limits - resources: {} - - # Options to select all nodes to deploy client DaemonSet on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client - command: ["pushprox-client"] - - copyCertsImage: - repository: rancher/mirrored-library-busybox - tag: 1.31.1 - -proxy: - enabled: true - # The port through which PushProx clients will communicate to the proxy - port: 8080 - - # Resource limits - resources: {} - - # Options to select a node to run a single proxy deployment on - nodeSelector: {} - tolerations: [] - - image: - repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy - command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/_helpers.tpl deleted file mode 100644 index 4fc68cf9754..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/_helpers.tpl +++ /dev/null @@ -1,73 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# General - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -The components in this chart create additional resources that expand the longest created name strings. -The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26. -*/}} -{{- define "windowsExporter.name" -}} -{{ printf "%s-windows-exporter" .Release.Name }} -{{- end -}} - -{{- define "windowsExporter.namespace" -}} -{{- default .Release.Namespace .Values.namespaceOverride -}} -{{- end -}} - -{{- define "windowsExporter.labels" -}} -k8s-app: {{ template "windowsExporter.name" . }} -release: {{ .Release.Name }} -component: "windows-exporter" -provider: kubernetes -{{- end -}} - -# Client - -{{- define "windowsExporter.client.nodeSelector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: windows -{{- else -}} -kubernetes.io/os: windows -{{- end -}} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector }} -{{- end }} -{{- end -}} - -{{- define "windowsExporter.client.tolerations" -}} -{{- if .Values.clients.tolerations -}} -{{ toYaml .Values.clients.tolerations }} -{{- else -}} -- operator: Exists -{{- end -}} -{{- end -}} - -{{- define "windowsExporter.client.env" -}} -- name: LISTEN_PORT - value: {{ required "Need .Values.clients.port to figure out where to get metrics from" .Values.clients.port | quote }} -{{- if .Values.clients.enabledCollectors }} -- name: ENABLED_COLLECTORS - value: {{ .Values.clients.enabledCollectors | quote }} -{{- end }} -{{- if .Values.clients.env }} -{{ toYaml .Values.clients.env }} -{{- end }} -{{- end -}} - -{{- define "windowsExporter.validatePathPrefix" -}} -{{- if .Values.global.cattle.rkeWindowsPathPrefix -}} -{{- $prefixPath := (.Values.global.cattle.rkeWindowsPathPrefix | replace "/" "\\") -}} -{{- if (not (hasSuffix "\\" $prefixPath)) -}} -{{- fail (printf ".Values.global.cattle.rkeWindowsPathPrefix must end in '/' or '\\', found %s" $prefixPath) -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/etcd.yaml deleted file mode 100644 index 78f230581d8..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/etcd.yaml +++ /dev/null @@ -1,1118 +0,0 @@ -{{- /* -Generated from 'etcd' from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/current/op-guide/grafana.json -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -{{- if (include "exporter.kubeEtcd.enabled" .)}} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "etcd" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - etcd.json: |- - { - "annotations": { - "list": [] - }, - "description": "etcd sample Grafana dashboard with Prometheus", - "editable": true, - "gnetId": null, - "hideControls": false, - "links": [], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(245, 54, 54, 0.9)", - "rgba(237, 129, 40, 0.89)", - "rgba(50, 172, 45, 0.97)" - ], - "datasource": "$datasource", - "editable": true, - "error": false, - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "id": 28, - "interval": null, - "isNew": true, - "links": [], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "targets": [ - { - "expr": "sum(etcd_server_has_leader{job=\"$cluster\"})", - "intervalFactor": 2, - "legendFormat": "", - "metric": "etcd_server_has_leader", - "refId": "A", - "step": 20 - } - ], - "thresholds": "", - "title": "Up", - "type": "singlestat", - "valueFontSize": "200%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "avg" - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "id": 23, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 5, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(grpc_server_started_total{job=\"$cluster\",grpc_type=\"unary\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "RPC Rate", - "metric": "grpc_server_started_total", - "refId": "A", - "step": 2 - }, - { - "expr": "sum(rate(grpc_server_handled_total{job=\"$cluster\",grpc_type=\"unary\",grpc_code!=\"OK\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "RPC Failed Rate", - "metric": "grpc_server_handled_total", - "refId": "B", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "RPC Rate", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ops", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "id": 41, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 4, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", - "intervalFactor": 2, - "legendFormat": "Watch Streams", - "metric": "grpc_server_handled_total", - "refId": "A", - "step": 4 - }, - { - "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", - "intervalFactor": 2, - "legendFormat": "Lease Streams", - "metric": "grpc_server_handled_total", - "refId": "B", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Active Streams", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "showTitle": false, - "title": "Row" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, - "fill": 0, - "grid": {}, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "etcd_mvcc_db_total_size_in_bytes{job=\"$cluster\"}", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} DB Size", - "metric": "", - "refId": "A", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "DB Size", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "grid": {}, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 1, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 4, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} WAL fsync", - "metric": "etcd_disk_wal_fsync_duration_seconds_bucket", - "refId": "A", - "step": 4 - }, - { - "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} DB fsync", - "metric": "etcd_disk_backend_commit_duration_seconds_bucket", - "refId": "B", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Disk Sync Duration", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "id": 29, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 4, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "process_resident_memory_bytes{job=\"$cluster\"}", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} Resident Memory", - "metric": "process_resident_memory_bytes", - "refId": "A", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Memory", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "New row" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 5, - "id": 22, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 3, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "rate(etcd_network_client_grpc_received_bytes_total{job=\"$cluster\"}[5m])", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} Client Traffic In", - "metric": "etcd_network_client_grpc_received_bytes_total", - "refId": "A", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Client Traffic In", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 5, - "id": 21, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 3, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "rate(etcd_network_client_grpc_sent_bytes_total{job=\"$cluster\"}[5m])", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} Client Traffic Out", - "metric": "etcd_network_client_grpc_sent_bytes_total", - "refId": "A", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Client Traffic Out", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "id": 20, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(etcd_network_peer_received_bytes_total{job=\"$cluster\"}[5m])) by (instance)", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} Peer Traffic In", - "metric": "etcd_network_peer_received_bytes_total", - "refId": "A", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Peer Traffic In", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, - "fill": 0, - "grid": {}, - "id": 16, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(etcd_network_peer_sent_bytes_total{job=\"$cluster\"}[5m])) by (instance)", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} Peer Traffic Out", - "metric": "etcd_network_peer_sent_bytes_total", - "refId": "A", - "step": 4 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Peer Traffic Out", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "Bps", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "New row" - }, - { - "collapse": false, - "editable": true, - "height": "250px", - "panels": [ - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "id": 40, - "isNew": true, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(etcd_server_proposals_failed_total{job=\"$cluster\"}[5m]))", - "intervalFactor": 2, - "legendFormat": "Proposal Failure Rate", - "metric": "etcd_server_proposals_failed_total", - "refId": "A", - "step": 2 - }, - { - "expr": "sum(etcd_server_proposals_pending{job=\"$cluster\"})", - "intervalFactor": 2, - "legendFormat": "Proposal Pending Total", - "metric": "etcd_server_proposals_pending", - "refId": "B", - "step": 2 - }, - { - "expr": "sum(rate(etcd_server_proposals_committed_total{job=\"$cluster\"}[5m]))", - "intervalFactor": 2, - "legendFormat": "Proposal Commit Rate", - "metric": "etcd_server_proposals_committed_total", - "refId": "C", - "step": 2 - }, - { - "expr": "sum(rate(etcd_server_proposals_applied_total{job=\"$cluster\"}[5m]))", - "intervalFactor": 2, - "legendFormat": "Proposal Apply Rate", - "refId": "D", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Raft Proposals", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": {}, - "bars": false, - "datasource": "$datasource", - "decimals": 0, - "editable": true, - "error": false, - "fill": 0, - "id": 19, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "changes(etcd_server_leader_changes_seen_total{job=\"$cluster\"}[1d])", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}instance{{`}}`}} Total Leader Elections Per Day", - "metric": "etcd_server_leader_changes_seen_total", - "refId": "A", - "step": 2 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Total Leader Elections Per Day", - "tooltip": { - "msResolution": false, - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "title": "New row" - } - ], - "schemaVersion": 13, - "sharedCrosshair": false, - "style": "dark", - "tags": [], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [], - "query": "label_values(etcd_server_has_leader, job)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "now": true, - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "browser", - "title": "etcd", - "uid": "c2f4e12cdf69feb95caa41a5a1b423d9", - "version": 215 - } -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml deleted file mode 100644 index 6badcf17399..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml +++ /dev/null @@ -1,1772 +0,0 @@ -{{- /* -Generated from 'k8s-resources-pod' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-resources-pod.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "requests", - "color": "#F2495C", - "fill": 0, - "hideTooltip": true, - "legend": true, - "linewidth": 2, - "stack": false - }, - { - "alias": "limits", - "color": "#FF9830", - "fill": 0, - "hideTooltip": true, - "legend": true, - "linewidth": 2, - "stack": false - } - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", cluster=\"$cluster\"}) by (container)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}container{{`}}`}}", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "requests", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "limits", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 2, - "legend": { - "avg": false, - "current": true, - "max": true, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(increase(container_cpu_cfs_throttled_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", cluster=\"$cluster\"}[5m])) by (container) /sum(increase(container_cpu_cfs_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", cluster=\"$cluster\"}[5m])) by (container)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}container{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 0.25, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Throttling", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Throttling", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "CPU Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "CPU Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Container", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "container", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Quota", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "requests", - "color": "#F2495C", - "dashes": true, - "fill": 0, - "hideTooltip": true, - "legend": false, - "linewidth": 2, - "stack": false - }, - { - "alias": "limits", - "color": "#FF9830", - "dashes": true, - "fill": 0, - "hideTooltip": true, - "legend": false, - "linewidth": 2, - "stack": false - } - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", image!=\"\"}) by (container)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}container{{`}}`}}", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "requests", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "limits", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Memory Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Usage (RSS)", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Usage (Cache)", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #G", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Usage (Swap)", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #H", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Container", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "container", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", image!=\"\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", image!=\"\"}) by (container) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\", image!=\"\"}) by (container) / sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, - { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "F", - "step": 10 - }, - { - "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "G", - "step": 10 - }, - { - "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "H", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Quota", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 6, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_receive_bytes_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Receive Bandwidth", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 7, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_transmit_bytes_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Transmit Bandwidth", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 8, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_receive_packets_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Rate of Received Packets", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 9, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_transmit_packets_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Rate of Transmitted Packets", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 10, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_receive_packets_dropped_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Rate of Received Packets Dropped", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 11, - "interval": "1m", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(irate(container_network_transmit_packets_dropped_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Rate of Transmitted Packets Dropped", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "default", - "value": "default" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "", - "value": "" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": null, - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "", - "value": "" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "", - "value": "" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "pod", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", - "refresh": 2, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "UTC", - "title": "Kubernetes / Compute Resources / Pod", - "uid": "6581e46e4e5c7ba40a07646395ef7b23", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml deleted file mode 100644 index 2b8eac4df64..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml +++ /dev/null @@ -1,959 +0,0 @@ -{{- /* -Generated from 'k8s-cluster-rsrc-use' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-cluster-rsrc-use" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-cluster-rsrc-use.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:cluster_cpu_utilisation:ratio{cluster=\"$cluster\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_cpu_saturation_load1:{cluster=\"$cluster\"} / scalar(sum(min(kube_pod_info{cluster=\"$cluster\"}) by (node)))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Saturation (Load1)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:cluster_memory_utilisation:ratio{cluster=\"$cluster\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_memory_swap_io_bytes:sum_rate{cluster=\"$cluster\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Saturation (Swap I/O)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_disk_utilisation:avg_irate{cluster=\"$cluster\"} / scalar(:kube_pod_info_node_count:{cluster=\"$cluster\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk IO Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_disk_saturation:avg_irate{cluster=\"$cluster\"} / scalar(:kube_pod_info_node_count:{cluster=\"$cluster\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk IO Saturation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Disk", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_net_utilisation:sum_irate{cluster=\"$cluster\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Net Utilisation (Transmitted)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_net_saturation:sum_irate{cluster=\"$cluster\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Net Saturation (Dropped)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(max(node_filesystem_size_bytes{fstype=~\"ext[234]|btrfs|xfs|zfs\", cluster=\"$cluster\"} - node_filesystem_avail_bytes{fstype=~\"ext[234]|btrfs|xfs|zfs\", cluster=\"$cluster\"}) by (device,pod,namespace)) by (pod,namespace)\n/ scalar(sum(max(node_filesystem_size_bytes{fstype=~\"ext[234]|btrfs|xfs|zfs\", cluster=\"$cluster\"}) by (device,pod,namespace)))\n* on (namespace, pod) group_left (node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"}\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}node{{`}}`}}", - "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk Capacity", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": 1, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Storage", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(:kube_pod_info_node_count:, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / USE Method / Cluster", - "uid": "a6e7d1362e1ddbb79db21d5bb40d7137", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-node-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-node-rsrc-use.yaml deleted file mode 100644 index 10125208692..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-node-rsrc-use.yaml +++ /dev/null @@ -1,986 +0,0 @@ -{{- /* -Generated from 'k8s-node-rsrc-use' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-node-rsrc-use" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-node-rsrc-use.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_cpu_utilisation:avg1m{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Utilisation", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_cpu_saturation_load1:{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Saturation", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Saturation (Load1)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_memory_utilisation:{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Memory", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_memory_swap_io_bytes:sum_rate{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Swap IO", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Saturation (Swap I/O)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_disk_utilisation:avg_irate{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Utilisation", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk IO Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_disk_saturation:avg_irate{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Saturation", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk IO Saturation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Disk", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_net_utilisation:sum_irate{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Utilisation", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Net Utilisation (Transmitted)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_net_saturation:sum_irate{cluster=\"$cluster\", node=\"$node\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Saturation", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Net Saturation (Dropped)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "Bps", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Net", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "node:node_filesystem_usage:{cluster=\"$cluster\"}\n* on (namespace, pod) group_left (node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\", node=\"$node\"}\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}device{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Disk", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(:kube_pod_info_node_count:, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "node", - "multi": false, - "name": "node", - "options": [ - - ], - "query": "label_values(kube_node_info{cluster=\"$cluster\"}, node)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / USE Method / Node", - "uid": "4ac4f123aae0ff6dbaf4f4f66120033b", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-cluster.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-cluster.yaml deleted file mode 100644 index e068214bc4a..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-cluster.yaml +++ /dev/null @@ -1,1479 +0,0 @@ -{{- /* -Generated from 'k8s-resources-cluster' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-cluster" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-resources-cluster.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "100px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[1m]))", - "format": "time_series", - "instant": true, - "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "CPU Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) / sum(node:node_num_cpu:sum{cluster=\"$cluster\"})", - "format": "time_series", - "instant": true, - "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "CPU Requests Commitment", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) / sum(node:node_num_cpu:sum{cluster=\"$cluster\"})", - "format": "time_series", - "instant": true, - "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "CPU Limits Commitment", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "1 - sum(:node_memory_MemFreeCachedBuffers_bytes:sum{cluster=\"$cluster\"}) / sum(:node_memory_MemTotal_bytes:sum{cluster=\"$cluster\"})", - "format": "time_series", - "instant": true, - "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "Memory Utilisation", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 5, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) / sum(:node_memory_MemTotal_bytes:sum{cluster=\"$cluster\"})", - "format": "time_series", - "instant": true, - "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "Memory Requests Commitment", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) / sum(:node_memory_MemTotal_bytes:sum{cluster=\"$cluster\"})", - "format": "time_series", - "instant": true, - "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "Memory Limits Commitment", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Headlines", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 7, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}namespace{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Pods", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "link": true, - "linkTooltip": "Drill down to pods", - "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "Workloads", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "link": true, - "linkTooltip": "Drill down to workloads", - "linkUrl": "./d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "CPU Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #G", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Namespace", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down to pods", - "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", - "pattern": "namespace", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "count(mixin_pod_workload{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "count(avg(mixin_pod_workload{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "F", - "step": 10 - }, - { - "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "G", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Quota", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 9, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}namespace{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage (w/o cache)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 10, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Pods", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "link": true, - "linkTooltip": "Drill down to pods", - "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "Workloads", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "link": true, - "linkTooltip": "Drill down to workloads", - "linkUrl": "./d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "Memory Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #G", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Namespace", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down to pods", - "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", - "pattern": "namespace", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "count(mixin_pod_workload{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "count(avg(mixin_pod_workload{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "F", - "step": 10 - }, - { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "G", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Requests by Namespace", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Requests", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(:kube_pod_info_node_count:, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Compute Resources / Cluster", - "uid": "efa86fd1d0c121a26444b636a3f509a8", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-namespace.yaml deleted file mode 100644 index af366473181..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-namespace.yaml +++ /dev/null @@ -1,963 +0,0 @@ -{{- /* -Generated from 'k8s-resources-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-resources-namespace.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod_name)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod_name{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "CPU Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "CPU Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Pod", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Quota", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"}) by (pod_name)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod_name{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage (w/o cache)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Memory Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Usage (RSS)", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Usage (Cache)", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #G", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Usage (Swap", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #H", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Pod", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"}) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "F", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "G", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "H", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Quota", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(:kube_pod_info_node_count:, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "namespace", - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Compute Resources / Namespace (Pods)", - "uid": "85a562078cdf77779eaa1add43ccec1e", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-pod.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-pod.yaml deleted file mode 100644 index 536a2c704fe..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-pod.yaml +++ /dev/null @@ -1,1006 +0,0 @@ -{{- /* -Generated from 'k8s-resources-pod' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-resources-pod.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", cluster=\"$cluster\"}) by (container_name)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}container_name{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "CPU Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "CPU Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Container", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "container", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Quota", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}) by (container_name)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}container_name{{`}}`}} (RSS)", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}) by (container_name)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}container_name{{`}}`}} (Cache)", - "legendLink": null, - "step": 10 - }, - { - "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}) by (container_name)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}container_name{{`}}`}} (Swap)", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Memory Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Usage (RSS)", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Usage (Cache)", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #G", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Usage (Swap", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #H", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Container", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "container", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", pod=\"$pod\"}) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name != \"\", container_name != \"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "F", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name != \"\", container_name != \"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "G", - "step": 10 - }, - { - "expr": "sum(label_replace(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name != \"\", container_name != \"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "H", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Quota", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(:kube_pod_info_node_count:, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "namespace", - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "pod", - "multi": false, - "name": "pod", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Compute Resources / Pod", - "uid": "6581e46e4e5c7ba40a07646395ef7b23", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workload.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workload.yaml deleted file mode 100644 index f5844b505ff..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workload.yaml +++ /dev/null @@ -1,936 +0,0 @@ -{{- /* -Generated from 'k8s-resources-workload' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workload" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-resources-workload.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "CPU Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "CPU Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Pod", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Quota", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Memory Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Pod", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Quota", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(:kube_pod_info_node_count:, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "namespace", - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "workload", - "multi": false, - "name": "workload", - "options": [ - - ], - "query": "label_values(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}, workload)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "type", - "multi": false, - "name": "type", - "options": [ - - ], - "query": "label_values(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\"}, workload_type)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Compute Resources / Workload", - "uid": "a164a7f0339f99e89cea5cb47e9be617", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml deleted file mode 100644 index 8a8b5077bce..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml +++ /dev/null @@ -1,972 +0,0 @@ -{{- /* -Generated from 'k8s-resources-workloads-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workloads-namespace" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - k8s-resources-workloads-namespace.json: |- - { - "annotations": { - "list": [ - - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "links": [ - - ], - "refresh": "10s", - "rows": [ - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 1, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}workload{{`}}`}} - {{`{{`}}workload_type{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Running Pods", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "CPU Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "CPU Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Workload", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "./d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$__cell_2", - "pattern": "workload", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "Workload Type", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "workload_type", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "count(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}) by (workload, workload_type)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "F", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Quota", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 10, - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 0, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}workload{{`}}`}} - {{`{{`}}workload_type{{`}}`}}", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 4, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, - { - "alias": "Running Pods", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 0, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "Memory Usage", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Requests %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Memory Limits", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ - - ], - "type": "number", - "unit": "bytes" - }, - { - "alias": "Memory Limits %", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "percentunit" - }, - { - "alias": "Workload", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTooltip": "Drill down", - "linkUrl": "./d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$__cell_2", - "pattern": "workload", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "Workload Type", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "workload_type", - "thresholds": [ - - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ - - ], - "type": "string", - "unit": "short" - } - ], - "targets": [ - { - "expr": "count(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}) by (workload, workload_type)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "C", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 - }, - { - "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, - { - "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "F", - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Quota", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transform": "table", - "type": "table", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Memory Quota", - "titleSize": "h6" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(:kube_pod_info_node_count:, cluster)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - "text": "prod", - "value": "prod" - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "namespace", - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, - "regex": "", - "sort": 2, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Compute Resources / Namespace (Workloads)", - "uid": "a87fb0d919ec0ea5f6543124e16c42a5", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/nodes.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/nodes.yaml deleted file mode 100644 index 17a97dae198..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/nodes.yaml +++ /dev/null @@ -1,1383 +0,0 @@ -{{- /* -Generated from 'nodes' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "nodes" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - nodes.json: |- - { - "__inputs": [ - - ], - "__requires": [ - - ], - "annotations": { - "list": [ - - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [ - - ], - "refresh": "", - "rows": [ - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 2, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(node_load1{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "load 1m", - "refId": "A" - }, - { - "expr": "max(node_load5{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "load 5m", - "refId": "B" - }, - { - "expr": "max(node_load15{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "load 15m", - "refId": "C" - }, - { - "expr": "count(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\", mode=\"user\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "logical cores", - "refId": "D" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "System load", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 3, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}cpu{{`}}`}}", - "refId": "A" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Usage Per Core", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 4, - "legend": { - "alignAsTable": "true", - "avg": "true", - "current": "true", - "max": "false", - "min": "false", - "rightSide": "true", - "show": "true", - "total": "false", - "values": "true" - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 9, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max (sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[2m])) ) * 100\n", - "format": "time_series", - "intervalFactor": 10, - "legendFormat": "{{`{{`}} cpu {{`}}`}}", - "refId": "A" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Utilization", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percent", - "label": null, - "logBase": 1, - "max": 100, - "min": 0, - "show": true - }, - { - "format": "percent", - "label": null, - "logBase": 1, - "max": 100, - "min": 0, - "show": true - } - ] - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 5, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "avg(sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[2m]))) * 100\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "80, 90", - "title": "CPU Usage", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 6, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 9, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(\n node_memory_MemTotal_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_MemFree_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Buffers_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Cached_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "memory used", - "refId": "A" - }, - { - "expr": "max(node_memory_Buffers_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "memory buffers", - "refId": "B" - }, - { - "expr": "max(node_memory_Cached_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "memory cached", - "refId": "C" - }, - { - "expr": "max(node_memory_MemFree_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "memory free", - "refId": "D" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 7, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "max(\n (\n (\n node_memory_MemTotal_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_MemFree_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Buffers_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Cached_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n )\n / node_memory_MemTotal_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n ) * 100)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "80, 90", - "title": "Memory Usage", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 8, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - { - "alias": "read", - "yaxis": 1 - }, - { - "alias": "io time", - "yaxis": 2 - } - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(rate(node_disk_read_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}[2m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "read", - "refId": "A" - }, - { - "expr": "max(rate(node_disk_written_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}[2m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "written", - "refId": "B" - }, - { - "expr": "max(rate(node_disk_io_time_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}[2m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "io time", - "refId": "C" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk I/O", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 9, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max by (namespace, pod, device) ((node_filesystem_size_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"} - node_filesystem_avail_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"}) / node_filesystem_size_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "disk used", - "refId": "A" - }, - { - "expr": "max by (namespace, pod, device) (node_filesystem_avail_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"} / node_filesystem_size_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "disk free", - "refId": "B" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Disk Space Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 10, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(rate(node_network_receive_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\", device!~\"lo\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}device{{`}}`}}", - "refId": "A" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Network Received", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 11, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 6, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(rate(node_network_transmit_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\", device!~\"lo\"}[5m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}device{{`}}`}}", - "refId": "A" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Network Transmitted", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 12, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 9, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(\n node_filesystem_files{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_filesystem_files_free{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "inodes used", - "refId": "A" - }, - { - "expr": "max(node_filesystem_files_free{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "inodes free", - "refId": "B" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Inodes Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 13, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "max(\n (\n (\n node_filesystem_files{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_filesystem_files_free{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n )\n / node_filesystem_files{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n ) * 100)\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "80, 90", - "title": "Inodes Usage", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(kube_pod_info, cluster)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "instance", - "options": [ - - ], - "query": "label_values(node_boot_time_seconds{cluster=\"$cluster\", job=\"node-exporter\"}, instance)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Nodes", - "uid": "fa49a4706d07a042595b664c87fb33ea", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/persistentvolumesusage.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/persistentvolumesusage.yaml deleted file mode 100644 index f6bc2955c0d..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/persistentvolumesusage.yaml +++ /dev/null @@ -1,573 +0,0 @@ -{{- /* -Generated from 'persistentvolumesusage' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "persistentvolumesusage" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - persistentvolumesusage.json: |- - { - "__inputs": [ - - ], - "__requires": [ - - ], - "annotations": { - "list": [ - - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [ - - ], - "refresh": "", - "rows": [ - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 2, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 9, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "(\n sum without(instance, node) (kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Used Space", - "refId": "A" - }, - { - "expr": "sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Free Space", - "refId": "B" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Volume Space Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - } - ] - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 3, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "(\n kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n -\n kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n)\n/\nkubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "80, 90", - "title": "Volume Space Usage", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 4, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": true, - "min": true, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 9, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "Used inodes", - "refId": "A" - }, - { - "expr": "(\n sum without(instance, node) (kubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": " Free inodes", - "refId": "B" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Volume inodes Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - } - ] - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "rgba(50, 172, 45, 0.97)", - "rgba(237, 129, 40, 0.89)", - "rgba(245, 54, 54, 0.9)" - ], - "datasource": "$datasource", - "format": "percent", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": true, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 5, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n/\nkubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100\n", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "80, 90", - "title": "Volume inodes Usage", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "N/A", - "value": "null" - } - ], - "valueName": "current" - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(kubelet_volume_stats_capacity_bytes, cluster)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "Namespace", - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\"}, namespace)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "PersistentVolumeClaim", - "multi": false, - "name": "volume", - "options": [ - - ], - "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\"}, persistentvolumeclaim)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-7d", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Persistent Volumes", - "uid": "919b92a8e8041bd567af9edab12c840c", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/pods.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/pods.yaml deleted file mode 100644 index 3b1e1539dd8..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/pods.yaml +++ /dev/null @@ -1,680 +0,0 @@ -{{- /* -Generated from 'pods' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "pods" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - pods.json: |- - { - "__inputs": [ - - ], - "__requires": [ - - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "$datasource", - "enable": true, - "expr": "time() == BOOL timestamp(rate(kube_pod_container_status_restarts_total{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[2m]) > 0)", - "hide": false, - "iconColor": "rgba(215, 44, 44, 1)", - "name": "Restarts", - "showIn": 0, - "tags": [ - "restart" - ], - "type": "rows" - } - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [ - - ], - "refresh": "", - "rows": [ - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 2, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by(container_name) (container_memory_usage_bytes{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name=~\"$container\", container_name!=\"POD\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Current: {{`{{`}} container_name {{`}}`}}", - "refId": "A" - }, - { - "expr": "sum by(container) (kube_pod_container_resource_requests{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\", pod=\"$pod\", container=~\"$container\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Requested: {{`{{`}} container {{`}}`}}", - "refId": "B" - }, - { - "expr": "sum by(container) (kube_pod_container_resource_limits{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\", pod=\"$pod\", container=~\"$container\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Limit: {{`{{`}} container {{`}}`}}", - "refId": "C" - }, - { - "expr": "sum by(container_name) (container_memory_cache{job=\"kubelet\", namespace=\"$namespace\", pod_name=~\"$pod\", container_name=~\"$container\", container_name!=\"POD\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Cache: {{`{{`}} container_name {{`}}`}}", - "refId": "D" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Memory Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 3, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum by (container_name) (rate(container_cpu_usage_seconds_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", image!=\"\", pod_name=\"$pod\", container_name=~\"$container\", container_name!=\"POD\"}[1m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Current: {{`{{`}} container_name {{`}}`}}", - "refId": "A" - }, - { - "expr": "sum by(container) (kube_pod_container_resource_requests{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\", pod=\"$pod\", container=~\"$container\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Requested: {{`{{`}} container {{`}}`}}", - "refId": "B" - }, - { - "expr": "sum by(container) (kube_pod_container_resource_limits{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\", pod=\"$pod\", container=~\"$container\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Limit: {{`{{`}} container {{`}}`}}", - "refId": "C" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 4, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sort_desc(sum by (pod_name) (rate(container_network_receive_bytes_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}[1m])))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "RX: {{`{{`}} pod_name {{`}}`}}", - "refId": "A" - }, - { - "expr": "sort_desc(sum by (pod_name) (rate(container_network_transmit_bytes_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}[1m])))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "TX: {{`{{`}} pod_name {{`}}`}}", - "refId": "B" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Network I/O", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "bytes", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 5, - "legend": { - "alignAsTable": true, - "avg": true, - "current": true, - "max": false, - "min": false, - "rightSide": true, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max by (container) (kube_pod_container_status_restarts_total{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "Restarts: {{`{{`}} container {{`}}`}}", - "refId": "A" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Total Restarts Per Container", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(kube_pod_info, cluster)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "Namespace", - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "Pod", - "multi": false, - "name": "pod", - "options": [ - - ], - "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=~\"$namespace\"}, pod)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": true, - "label": "Container", - "multi": false, - "name": "container", - "options": [ - - ], - "query": "label_values(kube_pod_container_info{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}, container)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / Pods", - "uid": "ab4f13a9892a76a4d21ce8c2445bf4ea", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/statefulset.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/statefulset.yaml deleted file mode 100644 index 01dbf12653c..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/statefulset.yaml +++ /dev/null @@ -1,926 +0,0 @@ -{{- /* -Generated from 'statefulset' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "statefulset" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} - labels: - {{- if $.Values.grafana.sidecar.dashboards.label }} - {{ $.Values.grafana.sidecar.dashboards.label }}: "1" - {{- end }} - app: {{ template "kube-prometheus-stack.name" $ }}-grafana -{{ include "kube-prometheus-stack.labels" $ | indent 4 }} -data: - statefulset.json: |- - { - "__inputs": [ - - ], - "__requires": [ - - ], - "annotations": { - "list": [ - - ] - }, - "editable": false, - "gnetId": null, - "graphTooltip": 0, - "hideControls": false, - "id": null, - "links": [ - - ], - "refresh": "", - "rows": [ - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 2, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "cores", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 4, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(rate(container_cpu_usage_seconds_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"$statefulset.*\"}[3m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "CPU", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 3, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "GB", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 4, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(container_memory_usage_bytes{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"$statefulset.*\"}) / 1024^3", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "Memory", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 4, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "Bps", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 4, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "lineColor": "rgb(31, 120, 193)", - "show": true - }, - "tableColumn": "", - "targets": [ - { - "expr": "sum(rate(container_network_transmit_bytes_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"$statefulset.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\",pod_name=~\"$statefulset.*\"}[3m]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "Network", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "current" - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "height": "100px", - "panels": [ - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 5, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "Desired Replicas", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 6, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "Replicas of current version", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 7, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "max(kube_statefulset_status_observed_generation{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "Observed Generation", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "current" - }, - { - "cacheTimeout": null, - "colorBackground": false, - "colorValue": false, - "colors": [ - "#299c46", - "rgba(237, 129, 40, 0.89)", - "#d44a3a" - ], - "datasource": "$datasource", - "format": "none", - "gauge": { - "maxValue": 100, - "minValue": 0, - "show": false, - "thresholdLabels": false, - "thresholdMarkers": true - }, - "gridPos": { - - }, - "id": 8, - "interval": null, - "links": [ - - ], - "mappingType": 1, - "mappingTypes": [ - { - "name": "value to text", - "value": 1 - }, - { - "name": "range to text", - "value": 2 - } - ], - "maxDataPoints": 100, - "nullPointMode": "connected", - "nullText": null, - "postfix": "", - "postfixFontSize": "50%", - "prefix": "", - "prefixFontSize": "50%", - "rangeMaps": [ - { - "from": "null", - "text": "N/A", - "to": "null" - } - ], - "span": 3, - "sparkline": { - "fillColor": "rgba(31, 118, 189, 0.18)", - "full": false, - "lineColor": "rgb(31, 120, 193)", - "show": false - }, - "tableColumn": "", - "targets": [ - { - "expr": "max(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A" - } - ], - "thresholds": "", - "title": "Metadata Generation", - "tooltip": { - "shared": false - }, - "type": "singlestat", - "valueFontSize": "80%", - "valueMaps": [ - { - "op": "=", - "text": "0", - "value": "null" - } - ], - "valueName": "current" - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - }, - { - "collapse": false, - "collapsed": false, - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - - }, - "id": 9, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "seriesOverrides": [ - - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "replicas specified", - "refId": "A" - }, - { - "expr": "max(kube_statefulset_status_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "replicas created", - "refId": "B" - }, - { - "expr": "min(kube_statefulset_status_replicas_ready{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "ready", - "refId": "C" - }, - { - "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "replicas of current version", - "refId": "D" - }, - { - "expr": "min(kube_statefulset_status_replicas_updated{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "updated", - "refId": "E" - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "Replicas", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": false, - "title": "Dashboard Row", - "titleSize": "h6", - "type": "row" - } - ], - "schemaVersion": 14, - "style": "dark", - "tags": [ - "kubernetes-mixin" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [ - - ], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, - "includeAll": false, - "label": "cluster", - "multi": false, - "name": "cluster", - "options": [ - - ], - "query": "label_values(kube_statefulset_metadata_generation, cluster)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "Namespace", - "multi": false, - "name": "namespace", - "options": [ - - ], - "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\"}, namespace)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allValue": null, - "current": { - - }, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": "Name", - "multi": false, - "name": "statefulset", - "options": [ - - ], - "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", namespace=\"$namespace\"}, statefulset)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [ - - ], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Kubernetes / StatefulSets", - "uid": "a31c1f46e6f727cb37c0d731a7245005", - "version": 0 - } -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/alertmanager.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/alertmanager.rules.yaml deleted file mode 100644 index 71159849c02..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/alertmanager.rules.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- /* -Generated from 'alertmanager.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.alertmanager }} -{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }} -{{- $alertmanagerJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }} -{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: alertmanager.rules - rules: - - alert: AlertmanagerConfigInconsistent - annotations: - message: The configuration of the instances of the Alertmanager cluster `{{`{{`}}$labels.service{{`}}`}}` are out of sync. - expr: count_values("config_hash", alertmanager_config_hash{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"}) BY (service) / ON(service) GROUP_LEFT() label_replace(max(prometheus_operator_spec_replicas{job="{{ $operatorJob }}",namespace="{{ $namespace }}",controller="alertmanager"}) by (name, job, namespace, controller), "service", "$1", "name", "(.*)") != 1 - for: 5m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: AlertmanagerFailedReload - annotations: - message: Reloading Alertmanager's configuration has failed for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod{{`}}`}}. - expr: alertmanager_config_last_reload_successful{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} == 0 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: AlertmanagerMembersInconsistent - annotations: - message: Alertmanager has not found all other members of the cluster. - expr: |- - alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} - != on (service) GROUP_LEFT() - count by (service) (alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"}) - for: 5m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/etcd.yaml deleted file mode 100644 index 048410bc3b0..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/etcd.yaml +++ /dev/null @@ -1,181 +0,0 @@ -{{- /* -Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/v3.4.0/op-guide/etcd3_alert.rules.yml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.etcd }} -{{- if (include "exporter.kubeEtcd.enabled" .)}} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "etcd" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: etcd - rules: - - alert: etcdInsufficientMembers - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": insufficient members ({{`{{`}} $value {{`}}`}}).' - expr: sum(up{job=~".*etcd.*"} == bool 1) by (job) < ((count(up{job=~".*etcd.*"}) by (job) + 1) / 2) - for: 3m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdNoLeader - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": member {{`{{`}} $labels.instance {{`}}`}} has no leader.' - expr: etcd_server_has_leader{job=~".*etcd.*"} == 0 - for: 1m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighNumberOfLeaderChanges - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": instance {{`{{`}} $labels.instance {{`}}`}} has seen {{`{{`}} $value {{`}}`}} leader changes within the last hour.' - expr: rate(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}[15m]) > 3 - for: 15m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighNumberOfFailedGRPCRequests - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.grpc_method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: |- - 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method) - / - sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method) - > 1 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighNumberOfFailedGRPCRequests - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.grpc_method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: |- - 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method) - / - sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method) - > 5 - for: 5m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdGRPCRequestsSlow - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": gRPC requests to {{`{{`}} $labels.grpc_method {{`}}`}} are taking {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: |- - histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_type="unary"}[5m])) by (job, instance, grpc_service, grpc_method, le)) - > 0.15 - for: 10m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdMemberCommunicationSlow - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": member communication with {{`{{`}} $labels.To {{`}}`}} is taking {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: |- - histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) - > 0.15 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighNumberOfFailedProposals - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}} proposal failures within the last hour on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5 - for: 15m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighFsyncDurations - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile fync durations are {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: |- - histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m])) - > 0.5 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighCommitDurations - annotations: - message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile commit durations {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: |- - histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m])) - > 0.25 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighNumberOfFailedHTTPRequests - annotations: - message: '{{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}' - expr: |- - sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) - BY (method) > 0.01 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHighNumberOfFailedHTTPRequests - annotations: - message: '{{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' - expr: |- - sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) - BY (method) > 0.05 - for: 10m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: etcdHTTPRequestsSlow - annotations: - message: etcd instance {{`{{`}} $labels.instance {{`}}`}} HTTP requests to {{`{{`}} $labels.method {{`}}`}} are slow. - expr: |- - histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m])) - > 0.15 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/general.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/general.rules.yaml deleted file mode 100644 index cde6feb5c97..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/general.rules.yaml +++ /dev/null @@ -1,56 +0,0 @@ -{{- /* -Generated from 'general.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.general }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "general.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: general.rules - rules: - - alert: TargetDown - annotations: - message: '{{`{{`}} $value {{`}}`}}% of the {{`{{`}} $labels.job {{`}}`}} targets are down.' - expr: 100 * (count(up == 0) BY (job) / count(up) BY (job)) > 10 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: Watchdog - annotations: - message: 'This is an alert meant to ensure that the entire alerting pipeline is functional. - - This alert is always firing, therefore it should always be firing in Alertmanager - - and always fire against a receiver. There are integrations with various notification - - mechanisms that send a notification when this alert is not firing. For example the - - "DeadMansSnitch" integration in PagerDuty. - - ' - expr: vector(1) - labels: - severity: none -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/k8s.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/k8s.rules.yaml deleted file mode 100644 index 08aa7fe2b3c..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/k8s.rules.yaml +++ /dev/null @@ -1,83 +0,0 @@ -{{- /* -Generated from 'k8s.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.k8s }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "k8s.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: k8s.rules - rules: - - expr: sum(rate(container_cpu_usage_seconds_total{job="kubelet", image!="", container_name!=""}[5m])) by (namespace) - record: namespace:container_cpu_usage_seconds_total:sum_rate - - expr: sum(container_memory_usage_bytes{job="kubelet", image!="", container_name!=""}) by (namespace) - record: namespace:container_memory_usage_bytes:sum - - expr: |- - sum by (namespace, pod_name, container_name) ( - rate(container_cpu_usage_seconds_total{job="kubelet", image!="", container_name!=""}[5m]) - ) - record: namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate - - expr: |- - sum by(namespace) ( - kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"} - * on (endpoint, instance, job, namespace, pod, service) - group_left(phase) (kube_pod_status_phase{phase=~"^(Pending|Running)$"} == 1) - ) - record: namespace_name:kube_pod_container_resource_requests_memory_bytes:sum - - expr: |- - sum by (namespace) ( - kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"} - * on (endpoint, instance, job, namespace, pod, service) - group_left(phase) (kube_pod_status_phase{phase=~"^(Pending|Running)$"} == 1) - ) - record: namespace_name:kube_pod_container_resource_requests_cpu_cores:sum - - expr: |- - sum( - label_replace( - label_replace( - kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, - "replicaset", "$1", "owner_name", "(.*)" - ) * on(replicaset, namespace) group_left(owner_name) kube_replicaset_owner{job="kube-state-metrics"}, - "workload", "$1", "owner_name", "(.*)" - ) - ) by (namespace, workload, pod) - labels: - workload_type: deployment - record: mixin_pod_workload - - expr: |- - sum( - label_replace( - kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, - "workload", "$1", "owner_name", "(.*)" - ) - ) by (namespace, workload, pod) - labels: - workload_type: daemonset - record: mixin_pod_workload - - expr: |- - sum( - label_replace( - kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, - "workload", "$1", "owner_name", "(.*)" - ) - ) by (namespace, workload, pod) - labels: - workload_type: statefulset - record: mixin_pod_workload -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-apiserver.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-apiserver.rules.yaml deleted file mode 100644 index e3a92969239..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-apiserver.rules.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- /* -Generated from 'kube-apiserver.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserver }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-apiserver.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kube-apiserver.rules - rules: - - expr: histogram_quantile(0.99, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.99' - record: cluster_quantile:apiserver_request_latencies:histogram_quantile - - expr: histogram_quantile(0.9, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.9' - record: cluster_quantile:apiserver_request_latencies:histogram_quantile - - expr: histogram_quantile(0.5, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.5' - record: cluster_quantile:apiserver_request_latencies:histogram_quantile -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml deleted file mode 100644 index a8d5400cb42..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- /* -Generated from 'kube-prometheus-node-alerting.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeAlerting }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-prometheus-node-alerting.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kube-prometheus-node-alerting.rules - rules: - - alert: NodeDiskRunningFull - annotations: - message: Device {{`{{`}} $labels.device {{`}}`}} of node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} will be full within the next 24 hours. - expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[6h], 3600 * 24) < 0)' - for: 30m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: NodeDiskRunningFull - annotations: - message: Device {{`{{`}} $labels.device {{`}}`}} of node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} will be full within the next 2 hours. - expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[30m], 3600 * 2) < 0)' - for: 10m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml deleted file mode 100644 index 87f072fd025..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml +++ /dev/null @@ -1,41 +0,0 @@ -{{- /* -Generated from 'kube-prometheus-node-recording.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeRecording }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-prometheus-node-recording.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kube-prometheus-node-recording.rules - rules: - - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[3m])) BY (instance) - record: instance:node_cpu:rate:sum - - expr: sum((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"})) BY (instance) - record: instance:node_filesystem_usage:sum - - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) - record: instance:node_network_receive_bytes:rate:sum - - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) - record: instance:node_network_transmit_bytes:rate:sum - - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance) - record: instance:node_cpu:ratio - - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) - record: cluster:node_cpu:sum_rate5m - - expr: cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu)) - record: cluster:node_cpu:ratio -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-scheduler.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-scheduler.rules.yaml deleted file mode 100644 index e8de8ed6e3a..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kube-scheduler.rules.yaml +++ /dev/null @@ -1,65 +0,0 @@ -{{- /* -Generated from 'kube-scheduler.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeScheduler }} -{{- if (include "exporter.kubeScheduler.enabled" .)}} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-scheduler.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kube-scheduler.rules - rules: - - expr: histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.99' - record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile - - expr: histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.99' - record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile - - expr: histogram_quantile(0.99, sum(rate(scheduler_binding_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.99' - record: cluster_quantile:scheduler_binding_latency:histogram_quantile - - expr: histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.9' - record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile - - expr: histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.9' - record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile - - expr: histogram_quantile(0.9, sum(rate(scheduler_binding_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.9' - record: cluster_quantile:scheduler_binding_latency:histogram_quantile - - expr: histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.5' - record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile - - expr: histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.5' - record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile - - expr: histogram_quantile(0.5, sum(rate(scheduler_binding_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 - labels: - quantile: '0.5' - record: cluster_quantile:scheduler_binding_latency:histogram_quantile -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-absent.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-absent.yaml deleted file mode 100644 index 85d27cc77e5..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-absent.yaml +++ /dev/null @@ -1,159 +0,0 @@ -{{- /* -Generated from 'kubernetes-absent' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesAbsent }} -{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }} -{{- $prometheusJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} -{{- $alertmanagerJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }} -{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-absent" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kubernetes-absent - rules: -{{- if .Values.alertmanager.enabled }} - - alert: AlertmanagerDown - annotations: - message: Alertmanager has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-alertmanagerdown - expr: absent(up{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- if .Values.kubeDns.enabled }} - - alert: CoreDNSDown - annotations: - message: CoreDNS has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-corednsdown - expr: absent(up{job="kube-dns"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- if .Values.kubeApiServer.enabled }} - - alert: KubeAPIDown - annotations: - message: KubeAPI has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapidown - expr: absent(up{job="apiserver"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- if (include "exporter.kubeControllerManager.enabled" .)}} - - alert: KubeControllerManagerDown - annotations: - message: KubeControllerManager has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecontrollermanagerdown - expr: absent(up{job="{{ include "exporter.kubeControllerManager.jobName" . }}"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- if (include "exporter.kubeScheduler.enabled" .)}} - - alert: KubeSchedulerDown - annotations: - message: KubeScheduler has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeschedulerdown - expr: absent(up{job="{{ include "exporter.kubeScheduler.jobName" . }}"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- if .Values.kubeStateMetrics.enabled }} - - alert: KubeStateMetricsDown - annotations: - message: KubeStateMetrics has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatemetricsdown - expr: absent(up{job="kube-state-metrics"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- if .Values.prometheusOperator.kubeletService.enabled }} - - alert: KubeletDown - annotations: - message: Kubelet has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletdown - expr: absent(up{job="kubelet"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- if .Values.nodeExporter.enabled }} - - alert: NodeExporterDown - annotations: - message: NodeExporter has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-nodeexporterdown - expr: absent(up{job="node-exporter"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} - - alert: PrometheusDown - annotations: - message: Prometheus has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusdown - expr: absent(up{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- if .Values.prometheusOperator.enabled }} - - alert: PrometheusOperatorDown - annotations: - message: PrometheusOperator has disappeared from Prometheus target discovery. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatordown - expr: absent(up{job="{{ $operatorJob }}",namespace="{{ $namespace }}"} == 1) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-apps.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-apps.yaml deleted file mode 100644 index e7a41ca2ab2..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-apps.yaml +++ /dev/null @@ -1,200 +0,0 @@ -{{- /* -Generated from 'kubernetes-apps' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesApps }} -{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-apps" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kubernetes-apps - rules: - - alert: KubePodCrashLooping - annotations: - message: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} ({{`{{`}} $labels.container {{`}}`}}) is restarting {{`{{`}} printf "%.2f" $value {{`}}`}} times / 5 minutes. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepodcrashlooping - expr: rate(kube_pod_container_status_restarts_total{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[15m]) * 60 * 5 > 0 - for: 1h - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubePodNotReady - annotations: - message: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} has been in a non-ready state for longer than an hour. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepodnotready - expr: sum by (namespace, pod) (kube_pod_status_phase{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}", phase=~"Pending|Unknown"}) > 0 - for: 1h - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeDeploymentGenerationMismatch - annotations: - message: Deployment generation for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.deployment {{`}}`}} does not match, this indicates that the Deployment has failed but has not been rolled back. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedeploymentgenerationmismatch - expr: |- - kube_deployment_status_observed_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - != - kube_deployment_metadata_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeDeploymentReplicasMismatch - annotations: - message: Deployment {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.deployment {{`}}`}} has not matched the expected number of replicas for longer than an hour. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedeploymentreplicasmismatch - expr: |- - kube_deployment_spec_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - != - kube_deployment_status_replicas_available{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - for: 1h - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeStatefulSetReplicasMismatch - annotations: - message: StatefulSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} has not matched the expected number of replicas for longer than 15 minutes. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetreplicasmismatch - expr: |- - kube_statefulset_status_replicas_ready{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - != - kube_statefulset_status_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeStatefulSetGenerationMismatch - annotations: - message: StatefulSet generation for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} does not match, this indicates that the StatefulSet has failed but has not been rolled back. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetgenerationmismatch - expr: |- - kube_statefulset_status_observed_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - != - kube_statefulset_metadata_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeStatefulSetUpdateNotRolledOut - annotations: - message: StatefulSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} update has not been rolled out. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetupdatenotrolledout - expr: |- - max without (revision) ( - kube_statefulset_status_current_revision{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - unless - kube_statefulset_status_update_revision{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - ) - * - ( - kube_statefulset_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - != - kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - ) - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeDaemonSetRolloutStuck - annotations: - message: Only {{`{{`}} $value {{`}}`}}% of the desired Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are scheduled and ready. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetrolloutstuck - expr: |- - kube_daemonset_status_number_ready{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - / - kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} * 100 < 100 - for: 15m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeDaemonSetNotScheduled - annotations: - message: '{{`{{`}} $value {{`}}`}} Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are not scheduled.' - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetnotscheduled - expr: |- - kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - - - kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeDaemonSetMisScheduled - annotations: - message: '{{`{{`}} $value {{`}}`}} Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are running where they are not supposed to run.' - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetmisscheduled - expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeCronJobRunning - annotations: - message: CronJob {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.cronjob {{`}}`}} is taking more than 1h to complete. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecronjobrunning - expr: time() - kube_cronjob_next_schedule_time{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 3600 - for: 1h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeJobCompletion - annotations: - message: Job {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.job_name {{`}}`}} is taking more than one hour to complete. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubejobcompletion - expr: kube_job_spec_completions{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - kube_job_status_succeeded{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 - for: 1h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeJobFailed - annotations: - message: Job {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.job_name {{`}}`}} failed to complete. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubejobfailed - expr: kube_job_status_failed{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 - for: 1h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-resources.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-resources.yaml deleted file mode 100644 index b34b442f3b0..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-resources.yaml +++ /dev/null @@ -1,121 +0,0 @@ -{{- /* -Generated from 'kubernetes-resources' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesResources }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-resources" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kubernetes-resources - rules: - - alert: KubeCPUOvercommit - annotations: - message: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuovercommit - expr: |- - sum(namespace_name:kube_pod_container_resource_requests_cpu_cores:sum) - / - sum(node:node_num_cpu:sum) - > - (count(node:node_num_cpu:sum)-1) / count(node:node_num_cpu:sum) - for: 5m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeMemOvercommit - annotations: - message: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememovercommit - expr: |- - sum(namespace_name:kube_pod_container_resource_requests_memory_bytes:sum) - / - sum(node_memory_MemTotal_bytes) - > - (count(node:node_num_cpu:sum)-1) - / - count(node:node_num_cpu:sum) - for: 5m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeCPUOvercommit - annotations: - message: Cluster has overcommitted CPU resource requests for Namespaces. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuovercommit - expr: |- - sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="cpu"}) - / - sum(node:node_num_cpu:sum) - > 1.5 - for: 5m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeMemOvercommit - annotations: - message: Cluster has overcommitted memory resource requests for Namespaces. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememovercommit - expr: |- - sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="memory"}) - / - sum(node_memory_MemTotal_bytes{job="node-exporter"}) - > 1.5 - for: 5m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeQuotaExceeded - annotations: - message: Namespace {{`{{`}} $labels.namespace {{`}}`}} is using {{`{{`}} printf "%0.0f" $value {{`}}`}}% of its {{`{{`}} $labels.resource {{`}}`}} quota. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubequotaexceeded - expr: |- - 100 * kube_resourcequota{job="kube-state-metrics", type="used"} - / ignoring(instance, job, type) - (kube_resourcequota{job="kube-state-metrics", type="hard"} > 0) - > 90 - for: 15m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: CPUThrottlingHigh - annotations: - message: '{{`{{`}} printf "%0.0f" $value {{`}}`}}% throttling of CPU in namespace {{`{{`}} $labels.namespace {{`}}`}} for container {{`{{`}} $labels.container_name {{`}}`}} in pod {{`{{`}} $labels.pod_name {{`}}`}}.' - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-cputhrottlinghigh - expr: |- - 100 * sum(increase(container_cpu_cfs_throttled_periods_total{container_name!="", }[5m])) by (container_name, pod_name, namespace) - / - sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container_name, pod_name, namespace) - > 25 - for: 15m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-storage.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-storage.yaml deleted file mode 100644 index 6469fffc52e..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-storage.yaml +++ /dev/null @@ -1,72 +0,0 @@ -{{- /* -Generated from 'kubernetes-storage' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesStorage }} -{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-storage" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kubernetes-storage - rules: - - alert: KubePersistentVolumeUsageCritical - annotations: - message: The PersistentVolume claimed by {{`{{`}} $labels.persistentvolumeclaim {{`}}`}} in Namespace {{`{{`}} $labels.namespace {{`}}`}} is only {{`{{`}} printf "%0.2f" $value {{`}}`}}% free. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumeusagecritical - expr: |- - 100 * kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} - / - kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} - < 3 - for: 1m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubePersistentVolumeFullInFourDays - annotations: - message: Based on recent sampling, the PersistentVolume claimed by {{`{{`}} $labels.persistentvolumeclaim {{`}}`}} in Namespace {{`{{`}} $labels.namespace {{`}}`}} is expected to fill up within four days. Currently {{`{{`}} printf "%0.2f" $value {{`}}`}}% is available. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumefullinfourdays - expr: |- - 100 * ( - kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} - / - kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} - ) < 15 - and - predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"}[6h], 4 * 24 * 3600) < 0 - for: 5m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubePersistentVolumeErrors - annotations: - message: The persistent volume {{`{{`}} $labels.persistentvolume {{`}}`}} has status {{`{{`}} $labels.phase {{`}}`}}. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumeerrors - expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0 - for: 5m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-system.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-system.yaml deleted file mode 100644 index da232057beb..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/kubernetes-system.yaml +++ /dev/null @@ -1,184 +0,0 @@ -{{- /* -Generated from 'kubernetes-system' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesSystem }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-system" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: kubernetes-system - rules: - - alert: KubeNodeNotReady - annotations: - message: '{{`{{`}} $labels.node {{`}}`}} has been unready for more than an hour.' - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubenodenotready - expr: kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 - for: 1h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeVersionMismatch - annotations: - message: There are {{`{{`}} $value {{`}}`}} different semantic versions of Kubernetes components running. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeversionmismatch - expr: count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1 - for: 1h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeClientErrors - annotations: - message: Kubernetes API server client '{{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.instance {{`}}`}}' is experiencing {{`{{`}} printf "%0.0f" $value {{`}}`}}% errors.' - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclienterrors - expr: |- - (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job) - / - sum(rate(rest_client_requests_total[5m])) by (instance, job)) - * 100 > 1 - for: 15m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeClientErrors - annotations: - message: Kubernetes API server client '{{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.instance {{`}}`}}' is experiencing {{`{{`}} printf "%0.0f" $value {{`}}`}} errors / second. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclienterrors - expr: sum(rate(ksm_scrape_error_total{job="kube-state-metrics"}[5m])) by (instance, job) > 0.1 - for: 15m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeletTooManyPods - annotations: - message: Kubelet {{`{{`}} $labels.instance {{`}}`}} is running {{`{{`}} $value {{`}}`}} Pods, close to the limit of 110. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubelettoomanypods - expr: kubelet_running_pod_count{job="kubelet"} > 110 * 0.9 - for: 15m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeAPILatencyHigh - annotations: - message: The API server has a 99th percentile latency of {{`{{`}} $value {{`}}`}} seconds for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}}. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapilatencyhigh - expr: cluster_quantile:apiserver_request_latencies:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"} > 1 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeAPILatencyHigh - annotations: - message: The API server has a 99th percentile latency of {{`{{`}} $value {{`}}`}} seconds for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}}. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapilatencyhigh - expr: cluster_quantile:apiserver_request_latencies:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"} > 4 - for: 10m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeAPIErrorsHigh - annotations: - message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh - expr: |- - sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) - / - sum(rate(apiserver_request_count{job="apiserver"}[5m])) * 100 > 3 - for: 10m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeAPIErrorsHigh - annotations: - message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh - expr: |- - sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) - / - sum(rate(apiserver_request_count{job="apiserver"}[5m])) * 100 > 1 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeAPIErrorsHigh - annotations: - message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}} {{`{{`}} $labels.subresource {{`}}`}}. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh - expr: |- - sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) by (resource,subresource,verb) - / - sum(rate(apiserver_request_count{job="apiserver"}[5m])) by (resource,subresource,verb) * 100 > 10 - for: 10m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeAPIErrorsHigh - annotations: - message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}} {{`{{`}} $labels.subresource {{`}}`}}. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh - expr: |- - sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) by (resource,subresource,verb) - / - sum(rate(apiserver_request_count{job="apiserver"}[5m])) by (resource,subresource,verb) * 100 > 5 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeClientCertificateExpiration - annotations: - message: A client certificate used to authenticate to the apiserver is expiring in less than 7.0 days. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclientcertificateexpiration - expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800 - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: KubeClientCertificateExpiration - annotations: - message: A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours. - runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclientcertificateexpiration - expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400 - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-network.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-network.yaml deleted file mode 100644 index c75f1ae074f..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-network.yaml +++ /dev/null @@ -1,57 +0,0 @@ -{{- /* -Generated from 'node-network' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.network }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node-network" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: node-network - rules: - - alert: NetworkReceiveErrors - annotations: - message: Network interface "{{`{{`}} $labels.device {{`}}`}}" showing receive errors on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}" - expr: rate(node_network_receive_errs_total{job="node-exporter",device!~"veth.+"}[2m]) > 0 - for: 2m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: NetworkTransmitErrors - annotations: - message: Network interface "{{`{{`}} $labels.device {{`}}`}}" showing transmit errors on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}" - expr: rate(node_network_transmit_errs_total{job="node-exporter",device!~"veth.+"}[2m]) > 0 - for: 2m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: NodeNetworkInterfaceFlapping - annotations: - message: Network interface "{{`{{`}} $labels.device {{`}}`}}" changing it's up status often on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}" - expr: changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2 - for: 2m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-time.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-time.yaml deleted file mode 100644 index b7a2fc92fd3..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node-time.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- /* -Generated from 'node-time' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.time }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node-time" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: node-time - rules: - - alert: ClockSkewDetected - annotations: - message: Clock skew detected on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}. Ensure NTP is configured correctly on this host. - expr: abs(node_timex_offset_seconds{job="node-exporter"}) > 0.03 - for: 2m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node.rules.yaml deleted file mode 100644 index 2bc7af3a97f..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/node.rules.yaml +++ /dev/null @@ -1,202 +0,0 @@ -{{- /* -Generated from 'node.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.node }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: node.rules - rules: - - expr: sum(min(kube_pod_info) by (node)) - record: ':kube_pod_info_node_count:' - - expr: max(label_replace(kube_pod_info{job="kube-state-metrics"}, "pod", "$1", "pod", "(.*)")) by (node, namespace, pod) - record: 'node_namespace_pod:kube_pod_info:' - - expr: |- - count by (node) (sum by (node, cpu) ( - node_cpu_seconds_total{job="node-exporter"} - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - )) - record: node:node_num_cpu:sum - - expr: 1 - avg(rate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[1m])) - record: :node_cpu_utilisation:avg1m - - expr: |- - 1 - avg by (node) ( - rate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[1m]) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info:) - record: node:node_cpu_utilisation:avg1m - - expr: |- - node:node_cpu_utilisation:avg1m - * - node:node_num_cpu:sum - / - scalar(sum(node:node_num_cpu:sum)) - record: node:cluster_cpu_utilisation:ratio - - expr: |- - sum(node_load1{job="node-exporter"}) - / - sum(node:node_num_cpu:sum) - record: ':node_cpu_saturation_load1:' - - expr: |- - sum by (node) ( - node_load1{job="node-exporter"} - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - / - node:node_num_cpu:sum - record: 'node:node_cpu_saturation_load1:' - - expr: |- - 1 - - sum(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) - / - sum(node_memory_MemTotal_bytes{job="node-exporter"}) - record: ':node_memory_utilisation:' - - expr: sum(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) - record: :node_memory_MemFreeCachedBuffers_bytes:sum - - expr: sum(node_memory_MemTotal_bytes{job="node-exporter"}) - record: :node_memory_MemTotal_bytes:sum - - expr: |- - sum by (node) ( - (node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: node:node_memory_bytes_available:sum - - expr: |- - sum by (node) ( - node_memory_MemTotal_bytes{job="node-exporter"} - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: node:node_memory_bytes_total:sum - - expr: |- - (node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum) - / - node:node_memory_bytes_total:sum - record: node:node_memory_utilisation:ratio - - expr: |- - (node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum) - / - scalar(sum(node:node_memory_bytes_total:sum)) - record: node:cluster_memory_utilisation:ratio - - expr: |- - 1e3 * sum( - (rate(node_vmstat_pgpgin{job="node-exporter"}[1m]) - + rate(node_vmstat_pgpgout{job="node-exporter"}[1m])) - ) - record: :node_memory_swap_io_bytes:sum_rate - - expr: |- - 1 - - sum by (node) ( - (node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - / - sum by (node) ( - node_memory_MemTotal_bytes{job="node-exporter"} - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: 'node:node_memory_utilisation:' - - expr: 1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum) - record: 'node:node_memory_utilisation_2:' - - expr: |- - 1e3 * sum by (node) ( - (rate(node_vmstat_pgpgin{job="node-exporter"}[1m]) - + rate(node_vmstat_pgpgout{job="node-exporter"}[1m])) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: node:node_memory_swap_io_bytes:sum_rate - - expr: avg(irate(node_disk_io_time_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m])) - record: :node_disk_utilisation:avg_irate - - expr: |- - avg by (node) ( - irate(node_disk_io_time_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m]) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: node:node_disk_utilisation:avg_irate - - expr: avg(irate(node_disk_io_time_weighted_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m])) - record: :node_disk_saturation:avg_irate - - expr: |- - avg by (node) ( - irate(node_disk_io_time_weighted_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m]) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: node:node_disk_saturation:avg_irate - - expr: |- - max by (instance, namespace, pod, device) ((node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"} - - node_filesystem_avail_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}) - / node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}) - record: 'node:node_filesystem_usage:' - - expr: max by (instance, namespace, pod, device) (node_filesystem_avail_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"} / node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}) - record: 'node:node_filesystem_avail:' - - expr: |- - sum(irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) + - sum(irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) - record: :node_net_utilisation:sum_irate - - expr: |- - sum by (node) ( - (irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[1m]) + - irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: node:node_net_utilisation:sum_irate - - expr: |- - sum(irate(node_network_receive_drop_total{job="node-exporter",device!~"veth.+"}[1m])) + - sum(irate(node_network_transmit_drop_total{job="node-exporter",device!~"veth.+"}[1m])) - record: :node_net_saturation:sum_irate - - expr: |- - sum by (node) ( - (irate(node_network_receive_drop_total{job="node-exporter",device!~"veth.+"}[1m]) + - irate(node_network_transmit_drop_total{job="node-exporter",device!~"veth.+"}[1m])) - * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: - ) - record: node:node_net_saturation:sum_irate - - expr: |- - max( - max( - kube_pod_info{job="kube-state-metrics", host_ip!=""} - ) by (node, host_ip) - * on (host_ip) group_right (node) - label_replace( - (max(node_filesystem_files{job="node-exporter", mountpoint="/"}) by (instance)), "host_ip", "$1", "instance", "(.*):.*" - ) - ) by (node) - record: 'node:node_inodes_total:' - - expr: |- - max( - max( - kube_pod_info{job="kube-state-metrics", host_ip!=""} - ) by (node, host_ip) - * on (host_ip) group_right (node) - label_replace( - (max(node_filesystem_files_free{job="node-exporter", mountpoint="/"}) by (instance)), "host_ip", "$1", "instance", "(.*):.*" - ) - ) by (node) - record: 'node:node_inodes_free:' -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus-operator.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus-operator.yaml deleted file mode 100644 index a8a8915b621..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus-operator.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- /* -Generated from 'prometheus-operator' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.prometheusOperator }} -{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }} -{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus-operator" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: prometheus-operator - rules: - - alert: PrometheusOperatorReconcileErrors - annotations: - message: Errors while reconciling {{`{{`}} $labels.controller {{`}}`}} in {{`{{`}} $labels.namespace {{`}}`}} Namespace. - expr: rate(prometheus_operator_reconcile_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusOperatorNodeLookupErrors - annotations: - message: Errors while reconciling Prometheus in {{`{{`}} $labels.namespace {{`}}`}} Namespace. - expr: rate(prometheus_operator_node_address_lookup_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus.rules.yaml deleted file mode 100644 index 0480c83b56d..00000000000 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules/prometheus.rules.yaml +++ /dev/null @@ -1,139 +0,0 @@ -{{- /* -Generated from 'prometheus.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml -Do not change in-place! In order to change this file first read following link: -https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack -*/ -}} -{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.prometheus }} -{{- $prometheusJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} -{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus.rules" | trunc 63 | trimSuffix "-" }} - namespace: {{ template "kube-prometheus-stack.namespace" . }} - labels: - app: {{ template "kube-prometheus-stack.name" . }} -{{ include "kube-prometheus-stack.labels" . | indent 4 }} -{{- if .Values.defaultRules.labels }} -{{ toYaml .Values.defaultRules.labels | indent 4 }} -{{- end }} -{{- if .Values.defaultRules.annotations }} - annotations: -{{ toYaml .Values.defaultRules.annotations | indent 4 }} -{{- end }} -spec: - groups: - - name: prometheus.rules - rules: - - alert: PrometheusConfigReloadFailed - annotations: - description: Reloading Prometheus' configuration has failed for {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} - summary: Reloading Prometheus' configuration failed - expr: prometheus_config_last_reload_successful{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} == 0 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusNotificationQueueRunningFull - annotations: - description: Prometheus' alert notification queue is running full for {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} - summary: Prometheus' alert notification queue is running full - expr: predict_linear(prometheus_notifications_queue_length{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m], 60 * 30) > prometheus_notifications_queue_capacity{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusErrorSendingAlerts - annotations: - description: Errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.Alertmanager{{`}}`}} - summary: Errors while sending alert from Prometheus - expr: rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) / rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0.01 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusErrorSendingAlerts - annotations: - description: Errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.Alertmanager{{`}}`}} - summary: Errors while sending alerts from Prometheus - expr: rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) / rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0.03 - for: 10m - labels: - severity: critical -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusNotConnectedToAlertmanagers - annotations: - description: Prometheus {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} is not connected to any Alertmanagers - summary: Prometheus is not connected to any Alertmanagers - expr: prometheus_notifications_alertmanagers_discovered{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} < 1 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusTSDBReloadsFailing - annotations: - description: '{{`{{`}}$labels.job{{`}}`}} at {{`{{`}}$labels.instance{{`}}`}} had {{`{{`}}$value | humanize{{`}}`}} reload failures over the last four hours.' - summary: Prometheus has issues reloading data blocks from disk - expr: increase(prometheus_tsdb_reloads_failures_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[2h]) > 0 - for: 12h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusTSDBCompactionsFailing - annotations: - description: '{{`{{`}}$labels.job{{`}}`}} at {{`{{`}}$labels.instance{{`}}`}} had {{`{{`}}$value | humanize{{`}}`}} compaction failures over the last four hours.' - summary: Prometheus has issues compacting sample blocks - expr: increase(prometheus_tsdb_compactions_failed_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[2h]) > 0 - for: 12h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusTSDBWALCorruptions - annotations: - description: '{{`{{`}}$labels.job{{`}}`}} at {{`{{`}}$labels.instance{{`}}`}} has a corrupted write-ahead log (WAL).' - summary: Prometheus write-ahead log is corrupted - expr: prometheus_tsdb_wal_corruptions_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} > 0 - for: 4h - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusNotIngestingSamples - annotations: - description: Prometheus {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} isn't ingesting samples. - summary: Prometheus isn't ingesting samples - expr: rate(prometheus_tsdb_head_samples_appended_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) <= 0 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} - - alert: PrometheusTargetScrapesDuplicate - annotations: - description: '{{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has many samples rejected due to duplicate timestamps but different values' - summary: Prometheus has many samples rejected - expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0 - for: 10m - labels: - severity: warning -{{- if .Values.defaultRules.additionalRuleLabels }} -{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/CHANGELOG.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/CHANGELOG.md similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/CHANGELOG.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/CHANGELOG.md diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/CONTRIBUTING.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/CONTRIBUTING.md similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/CONTRIBUTING.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/CONTRIBUTING.md diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/Chart.yaml similarity index 88% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/Chart.yaml index b8c33e5f413..b156cc0ca12 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/Chart.yaml @@ -15,11 +15,17 @@ annotations: catalog.cattle.io/requests-memory: 4000Mi catalog.cattle.io/ui-component: monitoring apiVersion: v2 -appVersion: 0.46.0 +appVersion: 0.48.0 dependencies: - condition: grafana.enabled name: grafana repository: file://./charts/grafana +- condition: hardenedKubelet.enabled + name: hardenedKubelet + repository: file://./charts/hardenedKubelet +- condition: hardenedNodeExporter.enabled + name: hardenedNodeExporter + repository: file://./charts/hardenedNodeExporter - condition: k3sServer.enabled name: k3sServer repository: file://./charts/k3sServer @@ -50,6 +56,9 @@ dependencies: - condition: rke2Etcd.enabled name: rke2Etcd repository: file://./charts/rke2Etcd +- condition: rke2IngressNginx.enabled + name: rke2IngressNginx + repository: file://./charts/rke2IngressNginx - condition: rke2Proxy.enabled name: rke2Proxy repository: file://./charts/rke2Proxy @@ -62,6 +71,9 @@ dependencies: - condition: rkeEtcd.enabled name: rkeEtcd repository: file://./charts/rkeEtcd +- condition: rkeIngressNginx.enabled + name: rkeIngressNginx + repository: file://./charts/rkeIngressNginx - condition: rkeProxy.enabled name: rkeProxy repository: file://./charts/rkeProxy @@ -100,4 +112,4 @@ sources: - https://github.com/prometheus-community/helm-charts - https://github.com/prometheus-operator/kube-prometheus type: application -version: 14.5.101+up14.5.0 +version: 16.6.0 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/README.md similarity index 91% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/README.md index aa5d530f29c..1efa0da5dcb 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/README.md @@ -35,7 +35,7 @@ _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documen By default this chart installs additional, dependent charts: -- [kubernetes/kube-state-metrics](https://github.com/kubernetes/kube-state-metrics/tree/master/charts/kube-state-metrics) +- [prometheus-community/kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) - [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) - [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana) @@ -83,6 +83,22 @@ _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documen A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. +### From 15.x to 16.x +Version 16 upgrades kube-state-metrics to v2.0.0. This includes changed command-line arguments and removed metrics, see this [blog post](https://kubernetes.io/blog/2021/04/13/kube-state-metrics-v-2-0/). This version also removes Grafana dashboards that supported Kubernetes 1.14 or earlier. + +### From 14.x to 15.x +Version 15 upgrades prometheus-operator from 0.46.x to 0.47.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.47.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + ### From 13.x to 14.x Version 14 upgrades prometheus-operator from 0.45.x to 0.46.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: @@ -109,6 +125,12 @@ kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheu ### From 11.x to 12.x +Version 12 upgrades prometheus-operator from 0.43.x to 0.44.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.44/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +``` + The chart was migrated to support only helm v3 and later. ### From 10.x to 11.x @@ -199,7 +221,9 @@ The following values are enabled for different distributions via [rancher-pushpr | `rkeControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in RKE clusters | `false` | | `rkeScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in RKE clusters | `false` | | `rkeProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in RKE clusters | `false` | +| `rkeIngressNginx.enabled` | Create a PushProx installation for monitoring ingress-nginx metrics in RKE clusters | `false` | | `rkeEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in RKE clusters | `false` | +| `rke2IngressNginx.enabled` | Create a PushProx installation for monitoring ingress-nginx metrics in RKE2 clusters | `false` | | `k3sServer.enabled` | Create a PushProx installation for monitoring k3s-server metrics (accounts for kube-controller-manager, kube-scheduler, and kube-proxy metrics) in k3s clusters | `false` | | `kubeAdmControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in kubeAdm clusters | `false` | | `kubeAdmScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in kubeAdm clusters | `false` | @@ -407,7 +431,7 @@ apiVersion: v1 kind: PersistentVolumeClaim metadata: labels: - app: prometheus + app.kubernetes.io/name: prometheus prometheus: prometheus-migration-prometheus name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0 namespace: monitoring diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/app-README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/app-README.md similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/app-README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/app-README.md diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/Chart.yaml similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/Chart.yaml index 6f950a023d9..77df89c4885 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-grafana apiVersion: v2 -appVersion: 7.4.5 +appVersion: 7.5.8 description: The leading tool for querying and visualizing time series and metrics. home: https://grafana.net icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png @@ -25,4 +25,4 @@ name: grafana sources: - https://github.com/grafana/grafana type: application -version: 6.6.4 +version: 6.11.0 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/README.md similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/README.md index 957f019ecf7..3d1d73e481a 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/README.md @@ -63,6 +63,7 @@ This version requires Helm >= 3.1.0. | `image.sha` | Image sha (optional) | `2b56f6106ddc376bb46d974230d530754bf65a640dfbc5245191d72d3b49efc6` | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | | `image.pullSecrets` | Image pull secrets | `{}` | +| `service.enabled` | Enable grafana service | `true` | | `service.type` | Kubernetes service type | `ClusterIP` | | `service.port` | Kubernetes port where service is exposed | `80` | | `service.portName` | Name of the port on the service | `service` | @@ -82,7 +83,7 @@ This version requires Helm >= 3.1.0. | `ingress.path` | Ingress accepted path | `/` | | `ingress.pathType` | Ingress type of path | `Prefix` | | `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | -| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | | `ingress.tls` | Ingress TLS configuration | `[]` | | `resources` | CPU/Memory resource requests/limits | `{}` | | `nodeSelector` | Node labels for pod assignment | `{}` | @@ -157,13 +158,16 @@ This version requires Helm >= 3.1.0. | `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | | `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | | `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | | `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | -| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | | `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | | `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | | `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | | `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | | `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | | `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | @@ -215,6 +219,7 @@ This version requires Helm >= 3.1.0. | `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | | `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | | `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | | `imageRenderer.service.portName` | image-renderer service port name | `'http'` | | `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` | | `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | @@ -242,6 +247,9 @@ ingress: ### Example of extraVolumeMounts +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If none existingClaim or hostPath argument is givent then type is emptyDir. + ```yaml - extraVolumeMounts: - name: plugins @@ -249,6 +257,10 @@ ingress: subPath: configs/grafana/plugins existingClaim: existing-grafana-claim readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false ``` ## Import dashboards diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/dashboards/custom-dashboard.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/dashboards/custom-dashboard.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/dashboards/custom-dashboard.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/dashboards/custom-dashboard.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/NOTES.txt similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/NOTES.txt rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/NOTES.txt diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/_helpers.tpl similarity index 90% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/_helpers.tpl index 76ad78876f5..03da0ff33c6 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/_helpers.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/_helpers.tpl @@ -143,3 +143,16 @@ Return the appropriate apiVersion for rbac. {{- print "rbac.authorization.k8s.io/v1beta1" -}} {{- end -}} {{- end -}} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) -}} + {{- if $secret -}} + {{- index $secret "data" "admin-password" -}} + {{- else -}} + {{- (randAlphaNum 40) | b64enc | quote -}} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/_pod.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/_pod.tpl similarity index 95% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/_pod.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/_pod.tpl index 2ba9f115ca2..9d19b4a3223 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/_pod.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/_pod.tpl @@ -100,7 +100,7 @@ initContainers: - name: FOLDER value: "/etc/grafana/provisioning/datasources" - name: RESOURCE - value: "both" + value: {{ quote .Values.sidecar.datasources.resource }} {{- if .Values.sidecar.enableUniqueFilenames }} - name: UNIQUE_FILENAMES value: "{{ .Values.sidecar.enableUniqueFilenames }}" @@ -135,7 +135,7 @@ initContainers: - name: FOLDER value: "/etc/grafana/provisioning/notifiers" - name: RESOURCE - value: "both" + value: {{ quote .Values.sidecar.notifiers.resource }} {{- if .Values.sidecar.enableUniqueFilenames }} - name: UNIQUE_FILENAMES value: "{{ .Values.sidecar.enableUniqueFilenames }}" @@ -184,7 +184,7 @@ containers: - name: FOLDER value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - name: RESOURCE - value: "both" + value: {{ quote .Values.sidecar.dashboards.resource }} {{- if .Values.sidecar.enableUniqueFilenames }} - name: UNIQUE_FILENAMES value: "{{ .Values.sidecar.enableUniqueFilenames }}" @@ -317,14 +317,14 @@ containers: containerPort: 3000 protocol: TCP env: - {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - name: GF_SECURITY_ADMIN_USER valueFrom: secretKeyRef: name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} key: {{ .Values.admin.userKey | default "admin-user" }} {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - name: GF_SECURITY_ADMIN_PASSWORD valueFrom: secretKeyRef: @@ -356,6 +356,14 @@ containers: - name: GF_RENDERING_CALLBACK_URL value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} {{ end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} {{- range $key, $value := .Values.envValueFrom }} - name: {{ $key | quote }} valueFrom: @@ -483,8 +491,15 @@ volumes: {{- end }} {{- range .Values.extraVolumeMounts }} - name: {{ .name }} + {{- if .existingClaim }} persistentVolumeClaim: claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + path: {{ .hostPath }} + {{- else }} + emptyDir: {} + {{- end }} {{- end }} {{- range .Values.extraEmptyDirMounts }} - name: {{ .name }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/configmap-dashboard-provider.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/configmap-dashboard-provider.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/configmap-dashboard-provider.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/configmap-dashboard-provider.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/configmap.yaml similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/configmap.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/configmap.yaml index de32b7ab2d2..c72219fb807 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/configmap.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/configmap.yaml @@ -19,8 +19,10 @@ data: {{- range $elem, $elemVal := $value }} {{- if kindIs "invalid" $elemVal }} {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} {{- else }} - {{ $elem }} = {{ tpl (toYaml $elemVal) $ }} + {{ $elem }} = {{ $elemVal }} {{- end }} {{- end }} {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/dashboards-json-configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/dashboards-json-configmap.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/dashboards-json-configmap.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/dashboards-json-configmap.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/deployment.yaml similarity index 83% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/deployment.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/deployment.yaml index 4d77794cd9b..1c9ae863816 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/deployment.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/deployment.yaml @@ -14,7 +14,9 @@ metadata: {{ toYaml . | indent 4 }} {{- end }} spec: + {{- if not .Values.autoscaling.enabled }} replicas: {{ .Values.replicas }} + {{- end }} revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} selector: matchLabels: @@ -34,7 +36,7 @@ spec: checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} -{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} {{- end }} {{- if .Values.envRenderSecret }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/headless-service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/headless-service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/headless-service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/headless-service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/hpa.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/hpa.yaml new file mode 100644 index 00000000000..9c186d74ac1 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/hpa.yaml @@ -0,0 +1,20 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "grafana.name" . }} + helm.sh/chart: {{ template "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: +{{ toYaml .Values.autoscaling.metrics | indent 4 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/image-renderer-deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/image-renderer-deployment.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/image-renderer-deployment.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/image-renderer-deployment.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/image-renderer-network-policy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/image-renderer-network-policy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/image-renderer-network-policy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/image-renderer-network-policy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/image-renderer-service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/image-renderer-service.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/image-renderer-service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/image-renderer-service.yaml index f5d3eb02f91..f29586c3ac2 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/image-renderer-service.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/image-renderer-service.yaml @@ -1,4 +1,5 @@ {{ if .Values.imageRenderer.enabled }} +{{ if .Values.imageRenderer.service.enabled }} apiVersion: v1 kind: Service metadata: @@ -26,3 +27,4 @@ spec: selector: {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} {{ end }} +{{ end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/ingress.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/ingress.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/ingress.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/ingress.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/nginx-config.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/nginx-config.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/nginx-config.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/nginx-config.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/poddisruptionbudget.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/poddisruptionbudget.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/poddisruptionbudget.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/poddisruptionbudget.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/podsecuritypolicy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/podsecuritypolicy.yaml similarity index 71% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/podsecuritypolicy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/podsecuritypolicy.yaml index 19da5079173..f7c5941ab68 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/podsecuritypolicy.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/podsecuritypolicy.yaml @@ -13,19 +13,8 @@ spec: privileged: false allowPrivilegeEscalation: false requiredDropCapabilities: - # Default set from Docker, without DAC_OVERRIDE or CHOWN - - FOWNER - - FSETID - - KILL - - SETGID - - SETUID - - SETPCAP - - NET_BIND_SERVICE - - NET_RAW - - SYS_CHROOT - - MKNOD - - AUDIT_WRITE - - SETFCAP + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL volumes: - 'configMap' - 'emptyDir' @@ -38,12 +27,20 @@ spec: hostIPC: false hostPID: false runAsUser: - rule: 'RunAsAny' + rule: 'MustRunAsNonRoot' seLinux: rule: 'RunAsAny' supplementalGroups: - rule: 'RunAsAny' + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 fsGroup: - rule: 'RunAsAny' + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 readOnlyRootFilesystem: false {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/pvc.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/pvc.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/pvc.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/pvc.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/rolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/rolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/rolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/secret-env.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/secret-env.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/secret-env.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/secret-env.yaml diff --git a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/secret.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/secret.yaml similarity index 62% rename from charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/secret.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/secret.yaml index 4fdd817dae5..6d06cf584f4 100644 --- a/charts/rancher-grafana/rancher-grafana/6.6.402+up6.6.4/templates/secret.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/secret.yaml @@ -1,4 +1,4 @@ -{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} apiVersion: v1 kind: Secret metadata: @@ -6,6 +6,10 @@ metadata: namespace: {{ template "grafana.namespace" . }} labels: {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} type: Opaque data: {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} @@ -13,7 +17,7 @@ data: {{- if .Values.adminPassword }} admin-password: {{ .Values.adminPassword | b64enc | quote }} {{- else }} - admin-password: {{ randAlphaNum 40 | b64enc | quote }} + admin-password: {{ template "grafana.password" . }} {{- end }} {{- end }} {{- if not .Values.ldap.existingSecret }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/service.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/service.yaml index 2764566986b..ba84ef97046 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/service.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/service.yaml @@ -1,3 +1,4 @@ +{{ if .Values.service.enabled }} apiVersion: v1 kind: Service metadata: @@ -47,4 +48,4 @@ spec: {{- end }} selector: {{- include "grafana.selectorLabels" . | nindent 4 }} - +{{ end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/serviceaccount.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/serviceaccount.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/servicemonitor.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/servicemonitor.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/statefulset.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/statefulset.yaml similarity index 88% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/statefulset.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/statefulset.yaml index 55c159c9f5f..802768645a4 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/statefulset.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/statefulset.yaml @@ -27,7 +27,7 @@ spec: checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} {{- end }} {{- with .Values.podAnnotations }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-configmap.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-configmap.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-configmap.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-podsecuritypolicy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-podsecuritypolicy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-podsecuritypolicy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-podsecuritypolicy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-rolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-rolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-rolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-serviceaccount.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test-serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test-serviceaccount.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/templates/tests/test.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/templates/tests/test.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/values.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/values.yaml index 9491c1a1f96..52466a9fcb3 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/grafana/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/grafana/values.yaml @@ -38,6 +38,22 @@ serviceAccount: replicas: 1 +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# - type: Resource +# resource: +# name: cpu +# targetAverageUtilization: 60 +# - type: Resource +# resource: +# name: memory +# targetAverageUtilization: 60 + ## See `kubectl explain poddisruptionbudget.spec` for more ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ podDisruptionBudget: {} @@ -69,7 +85,7 @@ livenessProbe: image: repository: rancher/mirrored-grafana-grafana - tag: 7.4.5 + tag: 7.5.8 sha: "" pullPolicy: IfNotPresent @@ -119,7 +135,7 @@ extraLabels: {} downloadDashboardsImage: repository: rancher/mirrored-curlimages-curl - tag: 7.73.0 + tag: 7.77.0 sha: "" pullPolicy: IfNotPresent @@ -144,6 +160,7 @@ podPortName: grafana ## ref: http://kubernetes.io/docs/user-guide/services/ ## service: + enabled: true type: ClusterIP port: 80 targetPort: 3000 @@ -420,10 +437,14 @@ extraSecretMounts: [] ## Additional grafana server volume mounts # Defines additional volume mounts. extraVolumeMounts: [] - # - name: extra-volume - # mountPath: /mnt/volume + # - name: extra-volume-0 + # mountPath: /mnt/volume0 # readOnly: true # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ ## Pass the plugins you want installed as a list. ## @@ -530,7 +551,7 @@ dashboardsConfigMaps: {} ## grafana.ini: paths: - data: /var/lib/grafana/data + data: /var/lib/grafana/ logs: /var/log/grafana plugins: /var/lib/grafana/plugins provisioning: /etc/grafana/provisioning @@ -601,7 +622,7 @@ smtp: sidecar: image: repository: rancher/mirrored-kiwigrid-k8s-sidecar - tag: 1.10.7 + tag: 1.12.2 sha: "" imagePullPolicy: IfNotPresent resources: {} @@ -629,6 +650,8 @@ sidecar: # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null + # search in configmap, secret or both + resource: both # If specified, the sidecar will look for annotation with this name to create folder and put graph here. # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. folderAnnotation: null @@ -658,10 +681,8 @@ sidecar: # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null - - ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment - ## This can be useful for database passwords, etc. Value is templated. - envFromSecret: "" + # search in configmap, secret or both + resource: both notifiers: enabled: false # label that the configmaps with notifiers are marked with @@ -670,6 +691,8 @@ sidecar: # Otherwise the namespace in which the sidecar is running will be used. # It's also possible to specify ALL to search in all namespaces searchNamespace: null + # search in configmap, secret or both + resource: both ## Override the deployment namespace ## @@ -688,7 +711,7 @@ imageRenderer: # image-renderer Image repository repository: rancher/mirrored-grafana-grafana-image-renderer # image-renderer Image tag - tag: 2.0.1 + tag: 3.0.1 # image-renderer Image sha (optional) sha: "" # image-renderer ImagePullPolicy @@ -707,6 +730,8 @@ imageRenderer: # image-renderer deployment priority class priorityClassName: '' service: + # Enable the image-renderer service + enabled: true # image-renderer service port name portName: 'http' # image-renderer service port used by both service and deployment diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/Chart.yaml new file mode 100644 index 00000000000..5d1f3a47836 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: hardenedKubelet +type: application +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/README.md similarity index 81% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/README.md index dcecc69daef..0530c56aa22 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/README.md @@ -24,11 +24,13 @@ The following tables list the configurable parameters of the rancher-pushprox ch | ----- | ----------- | ------ | | `component` | The component that is being monitored | `kube-etcd` | `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` #### Optional | Parameter | Description | Default | | ----- | ----------- | ------ | | `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | | `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | | `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | | `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | @@ -40,6 +42,10 @@ The following tables list the configurable parameters of the rancher-pushprox ch | `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | | `clients.resources` | Set resource limits and requests for the client container | `{}` | | `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | | `clients.tolerations` | Specify tolerations for clients | `[]` | diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/_helpers.tpl similarity index 80% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/_helpers.tpl index f77b8edf4f1..458ad21cdd5 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/_helpers.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/_helpers.tpl @@ -49,7 +49,7 @@ provider: kubernetes {{- if .Values.clients.proxyUrl -}} {{ printf "%s" .Values.clients.proxyUrl }} {{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} {{- end -}}{{- end -}} # Client @@ -84,4 +84,21 @@ k8s-app: {{ template "pushProxy.proxy.name" . }} app: {{ template "pushprox.serviceMonitor.name" . }} release: {{ .Release.Name | quote }} {{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} {{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-clients-rbac.yaml similarity index 88% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-clients-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-clients-rbac.yaml index 95346dee645..f1a8e7232bb 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-clients-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-clients-rbac.yaml @@ -13,6 +13,9 @@ rules: {{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} - nonResourceURLs: ["/metrics"] verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -27,20 +30,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-clients.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-clients.yaml index ed78792e5d9..3775d17b8fc 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-clients.yaml @@ -1,18 +1,28 @@ {{- if .Values.clients }}{{- if .Values.clients.enabled }} apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} kind: DaemonSet +{{- end }} metadata: name: {{ template "pushProxy.client.name" . }} namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} pushprox-exporter: "client" spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} selector: matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} template: metadata: labels: {{ include "pushProxy.client.labels" . | nindent 8 }} spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} {{- if .Values.clients.nodeSelector }} {{ toYaml .Values.clients.nodeSelector | indent 8 }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-proxy-rbac.yaml similarity index 90% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-proxy-rbac.yaml index a3509c16013..147eb437438 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-proxy-rbac.yaml @@ -23,20 +23,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-servicemonitor.yaml similarity index 85% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-servicemonitor.yaml index 2f3d7e54c94..7f961d6f493 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/templates/pushprox-servicemonitor.yaml @@ -6,13 +6,7 @@ metadata: namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} jobLabel: component podTargetLabels: - component diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/values.yaml similarity index 66% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/values.yaml index e1bcf79a5b7..6ad1eab4def 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedKubelet/values.yaml @@ -16,6 +16,8 @@ global: cattle: systemDefaultRegistry: "" +namespaceOverride: "" + # The component that is being monitored (i.e. etcd) component: "component" @@ -23,8 +25,13 @@ component: "component" metricsPort: 2739 # Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: +serviceMonitor: enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics clients: enabled: true @@ -52,22 +59,40 @@ clients: keyFile: "" caCertFile: "" + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + # Resource limits resources: {} # Options to select all nodes to deploy client DaemonSet on nodeSelector: {} tolerations: [] + affinity: {} image: repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client + tag: v0.1.0-rancher2-client command: ["pushprox-client"] copyCertsImage: repository: rancher/mirrored-library-busybox tag: 1.31.1 + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + proxy: enabled: true # The port through which PushProx clients will communicate to the proxy @@ -82,5 +107,5 @@ proxy: image: repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy + tag: v0.1.0-rancher2-proxy command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/Chart.yaml new file mode 100644 index 00000000000..4d6899cffef --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: hardenedNodeExporter +type: application +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/README.md similarity index 81% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/README.md index dcecc69daef..0530c56aa22 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/README.md @@ -24,11 +24,13 @@ The following tables list the configurable parameters of the rancher-pushprox ch | ----- | ----------- | ------ | | `component` | The component that is being monitored | `kube-etcd` | `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` #### Optional | Parameter | Description | Default | | ----- | ----------- | ------ | | `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | | `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | | `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | | `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | @@ -40,6 +42,10 @@ The following tables list the configurable parameters of the rancher-pushprox ch | `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | | `clients.resources` | Set resource limits and requests for the client container | `{}` | | `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | | `clients.tolerations` | Specify tolerations for clients | `[]` | diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/_helpers.tpl similarity index 80% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/_helpers.tpl index f77b8edf4f1..458ad21cdd5 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/_helpers.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/_helpers.tpl @@ -49,7 +49,7 @@ provider: kubernetes {{- if .Values.clients.proxyUrl -}} {{ printf "%s" .Values.clients.proxyUrl }} {{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} {{- end -}}{{- end -}} # Client @@ -84,4 +84,21 @@ k8s-app: {{ template "pushProxy.proxy.name" . }} app: {{ template "pushprox.serviceMonitor.name" . }} release: {{ .Release.Name | quote }} {{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} {{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-clients-rbac.yaml similarity index 88% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-clients-rbac.yaml index 95346dee645..f1a8e7232bb 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-clients-rbac.yaml @@ -13,6 +13,9 @@ rules: {{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} - nonResourceURLs: ["/metrics"] verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -27,20 +30,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-clients.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-clients.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-clients.yaml index ed78792e5d9..3775d17b8fc 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-clients.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-clients.yaml @@ -1,18 +1,28 @@ {{- if .Values.clients }}{{- if .Values.clients.enabled }} apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} kind: DaemonSet +{{- end }} metadata: name: {{ template "pushProxy.client.name" . }} namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} pushprox-exporter: "client" spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} selector: matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} template: metadata: labels: {{ include "pushProxy.client.labels" . | nindent 8 }} spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} {{- if .Values.clients.nodeSelector }} {{ toYaml .Values.clients.nodeSelector | indent 8 }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-proxy-rbac.yaml similarity index 90% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-proxy-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-proxy-rbac.yaml index a3509c16013..147eb437438 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-proxy-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-proxy-rbac.yaml @@ -23,20 +23,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-servicemonitor.yaml similarity index 85% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-servicemonitor.yaml index 2f3d7e54c94..7f961d6f493 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/templates/pushprox-servicemonitor.yaml @@ -6,13 +6,7 @@ metadata: namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} jobLabel: component podTargetLabels: - component diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/values.yaml similarity index 66% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/values.yaml index e1bcf79a5b7..6ad1eab4def 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/hardenedNodeExporter/values.yaml @@ -16,6 +16,8 @@ global: cattle: systemDefaultRegistry: "" +namespaceOverride: "" + # The component that is being monitored (i.e. etcd) component: "component" @@ -23,8 +25,13 @@ component: "component" metricsPort: 2739 # Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: +serviceMonitor: enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics clients: enabled: true @@ -52,22 +59,40 @@ clients: keyFile: "" caCertFile: "" + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + # Resource limits resources: {} # Options to select all nodes to deploy client DaemonSet on nodeSelector: {} tolerations: [] + affinity: {} image: repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client + tag: v0.1.0-rancher2-client command: ["pushprox-client"] copyCertsImage: repository: rancher/mirrored-library-busybox tag: 1.31.1 + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + proxy: enabled: true # The port through which PushProx clients will communicate to the proxy @@ -82,5 +107,5 @@ proxy: image: repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy + tag: v0.1.0-rancher2-proxy command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/Chart.yaml index 56ff36fc736..9cff4227ca5 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: k3sServer type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/README.md similarity index 81% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/README.md index dcecc69daef..0530c56aa22 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/README.md @@ -24,11 +24,13 @@ The following tables list the configurable parameters of the rancher-pushprox ch | ----- | ----------- | ------ | | `component` | The component that is being monitored | `kube-etcd` | `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` #### Optional | Parameter | Description | Default | | ----- | ----------- | ------ | | `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | | `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | | `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | | `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | @@ -40,6 +42,10 @@ The following tables list the configurable parameters of the rancher-pushprox ch | `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | | `clients.resources` | Set resource limits and requests for the client container | `{}` | | `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | | `clients.tolerations` | Specify tolerations for clients | `[]` | diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/_helpers.tpl similarity index 80% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/_helpers.tpl index f77b8edf4f1..458ad21cdd5 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/_helpers.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/_helpers.tpl @@ -49,7 +49,7 @@ provider: kubernetes {{- if .Values.clients.proxyUrl -}} {{ printf "%s" .Values.clients.proxyUrl }} {{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} {{- end -}}{{- end -}} # Client @@ -84,4 +84,21 @@ k8s-app: {{ template "pushProxy.proxy.name" . }} app: {{ template "pushprox.serviceMonitor.name" . }} release: {{ .Release.Name | quote }} {{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} {{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-clients-rbac.yaml similarity index 88% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-clients-rbac.yaml index 95346dee645..f1a8e7232bb 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-clients-rbac.yaml @@ -13,6 +13,9 @@ rules: {{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} - nonResourceURLs: ["/metrics"] verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -27,20 +30,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-clients.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-clients.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-clients.yaml index ed78792e5d9..3775d17b8fc 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-clients.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-clients.yaml @@ -1,18 +1,28 @@ {{- if .Values.clients }}{{- if .Values.clients.enabled }} apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} kind: DaemonSet +{{- end }} metadata: name: {{ template "pushProxy.client.name" . }} namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} pushprox-exporter: "client" spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} selector: matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} template: metadata: labels: {{ include "pushProxy.client.labels" . | nindent 8 }} spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} {{- if .Values.clients.nodeSelector }} {{ toYaml .Values.clients.nodeSelector | indent 8 }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-proxy-rbac.yaml similarity index 90% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-proxy-rbac.yaml index a3509c16013..147eb437438 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-proxy-rbac.yaml @@ -23,20 +23,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-servicemonitor.yaml similarity index 85% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-servicemonitor.yaml index 2f3d7e54c94..7f961d6f493 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/templates/pushprox-servicemonitor.yaml @@ -6,13 +6,7 @@ metadata: namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} jobLabel: component podTargetLabels: - component diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/values.yaml similarity index 66% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/values.yaml index e1bcf79a5b7..6ad1eab4def 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/k3sServer/values.yaml @@ -16,6 +16,8 @@ global: cattle: systemDefaultRegistry: "" +namespaceOverride: "" + # The component that is being monitored (i.e. etcd) component: "component" @@ -23,8 +25,13 @@ component: "component" metricsPort: 2739 # Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: +serviceMonitor: enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics clients: enabled: true @@ -52,22 +59,40 @@ clients: keyFile: "" caCertFile: "" + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + # Resource limits resources: {} # Options to select all nodes to deploy client DaemonSet on nodeSelector: {} tolerations: [] + affinity: {} image: repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client + tag: v0.1.0-rancher2-client command: ["pushprox-client"] copyCertsImage: repository: rancher/mirrored-library-busybox tag: 1.31.1 + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + proxy: enabled: true # The port through which PushProx clients will communicate to the proxy @@ -82,5 +107,5 @@ proxy: image: repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy + tag: v0.1.0-rancher2-proxy command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/Chart.yaml similarity index 90% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/Chart.yaml index 1e90053e9dd..9e2ebba4aa2 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/Chart.yaml @@ -4,8 +4,8 @@ annotations: catalog.rancher.io/certified: rancher catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-kube-state-metrics -apiVersion: v1 -appVersion: 1.9.8 +apiVersion: v2 +appVersion: 2.0.0 description: Install kube-state-metrics to generate and expose cluster-level metrics home: https://github.com/kubernetes/kube-state-metrics/ keywords: @@ -21,4 +21,5 @@ maintainers: name: kube-state-metrics sources: - https://github.com/kubernetes/kube-state-metrics/ -version: 2.13.1 +type: application +version: 3.2.0 diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/README.md similarity index 56% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/README.md index e93a3d25245..7c2e16918f3 100644 --- a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/README.md @@ -5,7 +5,7 @@ Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state ## Get Repo Info ```console -helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts helm repo update ``` @@ -14,11 +14,7 @@ _See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation ## Install Chart ```console -# Helm 3 -$ helm install [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] - -# Helm 2 -$ helm install --name [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +helm install [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] ``` _See [configuration](#configuration) below._ @@ -28,11 +24,7 @@ _See [helm install](https://helm.sh/docs/helm/helm_install/) for command documen ## Uninstall Chart ```console -# Helm 3 -$ helm uninstall [RELEASE_NAME] - -# Helm 2 -# helm delete --purge [RELEASE_NAME] +helm uninstall [RELEASE_NAME] ``` This removes all the Kubernetes components associated with the chart and deletes the release. @@ -42,25 +34,35 @@ _See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command doc ## Upgrading Chart ```console -# Helm 3 or 2 -$ helm upgrade [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +helm upgrade [RELEASE_NAME] prometheus-community/kube-state-metrics [flags] ``` _See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ -### From stable/kube-state-metrics +### Migrating from stable/kube-state-metrics and kubernetes/kube-state-metrics You can upgrade in-place: 1. [get repo info](#get-repo-info) 1. [upgrade](#upgrading-chart) your existing release name using the new chart repo + +## Upgrading to v3.0.0 + +v3.0.0 includes kube-state-metrics v2.0, see the [changelog](https://github.com/kubernetes/kube-state-metrics/blob/release-2.0/CHANGELOG.md) for major changes on the application-side. + +The upgraded chart now the following changes: +* Dropped support for helm v2 (helm v3 or later is required) +* collectors key was renamed to resources +* namespace key was renamed to namespaces + + ## Configuration See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: ```console -helm show values kube-state-metrics/kube-state-metrics +helm show values prometheus-community/kube-state-metrics ``` -You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options. +You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/NOTES.txt similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/NOTES.txt rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/NOTES.txt diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/_helpers.tpl similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/_helpers.tpl diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/clusterrolebinding.yaml diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/deployment.yaml similarity index 81% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/deployment.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/deployment.yaml index 4ab55291b10..f338308ad9e 100644 --- a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/deployment.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/deployment.yaml @@ -70,91 +70,91 @@ spec: {{- end }} {{ end }} {{ if .Values.collectors.certificatesigningrequests }} - - --collectors=certificatesigningrequests + - --resources=certificatesigningrequests {{ end }} {{ if .Values.collectors.configmaps }} - - --collectors=configmaps + - --resources=configmaps {{ end }} {{ if .Values.collectors.cronjobs }} - - --collectors=cronjobs + - --resources=cronjobs {{ end }} {{ if .Values.collectors.daemonsets }} - - --collectors=daemonsets + - --resources=daemonsets {{ end }} {{ if .Values.collectors.deployments }} - - --collectors=deployments + - --resources=deployments {{ end }} {{ if .Values.collectors.endpoints }} - - --collectors=endpoints + - --resources=endpoints {{ end }} {{ if .Values.collectors.horizontalpodautoscalers }} - - --collectors=horizontalpodautoscalers + - --resources=horizontalpodautoscalers {{ end }} {{ if .Values.collectors.ingresses }} - - --collectors=ingresses + - --resources=ingresses {{ end }} {{ if .Values.collectors.jobs }} - - --collectors=jobs + - --resources=jobs {{ end }} {{ if .Values.collectors.limitranges }} - - --collectors=limitranges + - --resources=limitranges {{ end }} {{ if .Values.collectors.mutatingwebhookconfigurations }} - - --collectors=mutatingwebhookconfigurations + - --resources=mutatingwebhookconfigurations {{ end }} {{ if .Values.collectors.namespaces }} - - --collectors=namespaces + - --resources=namespaces {{ end }} {{ if .Values.collectors.networkpolicies }} - - --collectors=networkpolicies + - --resources=networkpolicies {{ end }} {{ if .Values.collectors.nodes }} - - --collectors=nodes + - --resources=nodes {{ end }} {{ if .Values.collectors.persistentvolumeclaims }} - - --collectors=persistentvolumeclaims + - --resources=persistentvolumeclaims {{ end }} {{ if .Values.collectors.persistentvolumes }} - - --collectors=persistentvolumes + - --resources=persistentvolumes {{ end }} {{ if .Values.collectors.poddisruptionbudgets }} - - --collectors=poddisruptionbudgets + - --resources=poddisruptionbudgets {{ end }} {{ if .Values.collectors.pods }} - - --collectors=pods + - --resources=pods {{ end }} {{ if .Values.collectors.replicasets }} - - --collectors=replicasets + - --resources=replicasets {{ end }} {{ if .Values.collectors.replicationcontrollers }} - - --collectors=replicationcontrollers + - --resources=replicationcontrollers {{ end }} {{ if .Values.collectors.resourcequotas }} - - --collectors=resourcequotas + - --resources=resourcequotas {{ end }} {{ if .Values.collectors.secrets }} - - --collectors=secrets + - --resources=secrets {{ end }} {{ if .Values.collectors.services }} - - --collectors=services + - --resources=services {{ end }} {{ if .Values.collectors.statefulsets }} - - --collectors=statefulsets + - --resources=statefulsets {{ end }} {{ if .Values.collectors.storageclasses }} - - --collectors=storageclasses + - --resources=storageclasses {{ end }} {{ if .Values.collectors.validatingwebhookconfigurations }} - - --collectors=validatingwebhookconfigurations + - --resources=validatingwebhookconfigurations {{ end }} {{ if .Values.collectors.verticalpodautoscalers }} - - --collectors=verticalpodautoscalers + - --resources=verticalpodautoscalers {{ end }} {{ if .Values.collectors.volumeattachments }} - - --collectors=volumeattachments + - --resources=volumeattachments {{ end }} -{{ if .Values.namespace }} - - --namespace={{ .Values.namespace | join "," }} +{{ if .Values.namespaces }} + - --namespaces={{ tpl .Values.namespaces $ | join "," }} {{ end }} {{ if .Values.autosharding.enabled }} - --pod=$(POD_NAME) @@ -177,6 +177,9 @@ spec: image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" ports: - containerPort: 8080 +{{- if .Values.selfMonitor.enabled }} + - containerPort: 8081 +{{- end }} livenessProbe: httpGet: path: /healthz @@ -193,6 +196,10 @@ spec: resources: {{ toYaml .Values.resources | indent 10 }} {{- end }} +{{- if .Values.containerSecurityContext }} + securityContext: +{{ toYaml .Values.containerSecurityContext | indent 10 }} +{{- end }} {{- if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 8 }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/kubeconfig-secret.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/kubeconfig-secret.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/kubeconfig-secret.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/kubeconfig-secret.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/pdb.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/pdb.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/pdb.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/pdb.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/podsecuritypolicy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/podsecuritypolicy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/podsecuritypolicy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/psp-clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/psp-clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/psp-clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/role.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/role.yaml index 6259d2f6175..25c8bc89337 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/role.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/role.yaml @@ -1,11 +1,8 @@ -{{- if and (eq $.Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} -{{- if eq .Values.rbac.useClusterRole false }} -{{- range (split "," $.Values.namespace) }} -{{- end }} -{{- end -}} +{{- if and (eq .Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- range (split "," .Values.namespaces) }} --- apiVersion: rbac.authorization.k8s.io/v1 -{{- if eq .Values.rbac.useClusterRole false }} +{{- if eq $.Values.rbac.useClusterRole false }} kind: Role {{- else }} kind: ClusterRole @@ -17,7 +14,7 @@ metadata: app.kubernetes.io/managed-by: {{ $.Release.Service }} app.kubernetes.io/instance: {{ $.Release.Name }} name: {{ template "kube-state-metrics.fullname" $ }} -{{- if eq .Values.rbac.useClusterRole false }} +{{- if eq $.Values.rbac.useClusterRole false }} namespace: {{ . }} {{- end }} rules: @@ -190,3 +187,4 @@ rules: verbs: ["list", "watch"] {{ end -}} {{- end -}} +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/rolebinding.yaml similarity index 95% rename from charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/rolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/rolebinding.yaml index 732174a3340..72a1a2e904c 100644 --- a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101+up2.13.1/templates/rolebinding.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/rolebinding.yaml @@ -1,5 +1,5 @@ {{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} -{{- range (split "," $.Values.namespace) }} +{{- range (split "," $.Values.namespaces) }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/serviceaccount.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/serviceaccount.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/servicemonitor.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/servicemonitor.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/stsdiscovery-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/stsdiscovery-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/stsdiscovery-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/values.yaml similarity index 91% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/values.yaml index f64645690e6..052e534de3f 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kube-state-metrics/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kube-state-metrics/values.yaml @@ -6,7 +6,7 @@ global: prometheusScrape: true image: repository: rancher/mirrored-kube-state-metrics-kube-state-metrics - tag: v1.9.8 + tag: v2.0.0 pullPolicy: IfNotPresent imagePullSecrets: [] @@ -93,6 +93,11 @@ securityContext: runAsUser: 65534 fsGroup: 65534 +## Specify security settings for a Container +## Allows overrides and additional options compared to (Pod) securityContext +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +containerSecurityContext: {} + ## Node labels for pod assignment ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} @@ -115,7 +120,7 @@ podAnnotations: {} podDisruptionBudget: {} # Available collectors for kube-state-metrics. By default all available -# collectors are enabled. +# resources are enabled. collectors: certificatesigningrequests: true configmaps: true @@ -152,8 +157,8 @@ kubeconfig: # base64 encoded kube-config file secret: -# Namespace to be enabled for collecting resources. By default all namespaces are collected. -# namespace: "" +# Comma-separated list of namespaces to be enabled for collecting resources. By default all namespaces are collected. +namespaces: "" ## Override the deployment namespace ## @@ -177,7 +182,7 @@ resources: {} kubeTargetVersionOverride: "" # Enable self metrics configuration for service and Service Monitor -# Default values for telemetry configuration can be overriden +# Default values for telemetry configuration can be overridden selfMonitor: enabled: false # telemetryHost: 0.0.0.0 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/Chart.yaml index a82ef1d32b7..cf16381b155 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: kubeAdmControllerManager type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/README.md similarity index 81% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/README.md index dcecc69daef..0530c56aa22 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/README.md @@ -24,11 +24,13 @@ The following tables list the configurable parameters of the rancher-pushprox ch | ----- | ----------- | ------ | | `component` | The component that is being monitored | `kube-etcd` | `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` #### Optional | Parameter | Description | Default | | ----- | ----------- | ------ | | `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | | `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | | `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | | `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | @@ -40,6 +42,10 @@ The following tables list the configurable parameters of the rancher-pushprox ch | `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | | `clients.resources` | Set resource limits and requests for the client container | `{}` | | `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | | `clients.tolerations` | Specify tolerations for clients | `[]` | diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/_helpers.tpl similarity index 80% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/_helpers.tpl index f77b8edf4f1..458ad21cdd5 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/_helpers.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/_helpers.tpl @@ -49,7 +49,7 @@ provider: kubernetes {{- if .Values.clients.proxyUrl -}} {{ printf "%s" .Values.clients.proxyUrl }} {{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} {{- end -}}{{- end -}} # Client @@ -84,4 +84,21 @@ k8s-app: {{ template "pushProxy.proxy.name" . }} app: {{ template "pushprox.serviceMonitor.name" . }} release: {{ .Release.Name | quote }} {{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} {{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml similarity index 88% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml index 95346dee645..f1a8e7232bb 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml @@ -13,6 +13,9 @@ rules: {{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} - nonResourceURLs: ["/metrics"] verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 @@ -27,20 +30,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-clients.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml index ed78792e5d9..3775d17b8fc 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-clients.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml @@ -1,18 +1,28 @@ {{- if .Values.clients }}{{- if .Values.clients.enabled }} apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} kind: DaemonSet +{{- end }} metadata: name: {{ template "pushProxy.client.name" . }} namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} pushprox-exporter: "client" spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} selector: matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} template: metadata: labels: {{ include "pushProxy.client.labels" . | nindent 8 }} spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} {{- if .Values.clients.nodeSelector }} {{ toYaml .Values.clients.nodeSelector | indent 8 }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml similarity index 90% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml index a3509c16013..147eb437438 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml @@ -23,20 +23,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} spec: privileged: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml similarity index 85% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml index 2f3d7e54c94..7f961d6f493 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/k3sServer/templates/pushprox-servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml @@ -6,13 +6,7 @@ metadata: namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} spec: - endpoints: - - port: metrics - proxyUrl: {{ template "pushProxy.proxyUrl" . }} - {{- if .Values.clients.https.enabled }} - params: - _scheme: [https] - {{- end }} + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} jobLabel: component podTargetLabels: - component diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/values.yaml similarity index 66% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/values.yaml index e1bcf79a5b7..6ad1eab4def 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmControllerManager/values.yaml @@ -16,6 +16,8 @@ global: cattle: systemDefaultRegistry: "" +namespaceOverride: "" + # The component that is being monitored (i.e. etcd) component: "component" @@ -23,8 +25,13 @@ component: "component" metricsPort: 2739 # Configure ServiceMonitor that monitors metrics from the metricsPort endpoint -serviceMonitor: +serviceMonitor: enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics clients: enabled: true @@ -52,22 +59,40 @@ clients: keyFile: "" caCertFile: "" + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + # Resource limits resources: {} # Options to select all nodes to deploy client DaemonSet on nodeSelector: {} tolerations: [] + affinity: {} image: repository: rancher/pushprox-client - tag: v0.1.0-rancher1-client + tag: v0.1.0-rancher2-client command: ["pushprox-client"] copyCertsImage: repository: rancher/mirrored-library-busybox tag: 1.31.1 + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + proxy: enabled: true # The port through which PushProx clients will communicate to the proxy @@ -82,5 +107,5 @@ proxy: image: repository: rancher/pushprox-proxy - tag: v0.1.0-rancher1-proxy + tag: v0.1.0-rancher2-proxy command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/Chart.yaml index bfb047ae693..c221ee5d3b8 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmEtcd/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: kubeAdmEtcd type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmEtcd/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/Chart.yaml index ffe9ae70cbd..174b9ede276 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmProxy/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: kubeAdmProxy type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmProxy/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/Chart.yaml index 794197de139..02be8ce6269 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/kubeAdmScheduler/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: kubeAdmScheduler type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/kubeAdmScheduler/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/Chart.yaml similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/Chart.yaml index 194f0877b9b..1e35ab0cd51 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-prometheus-adapter apiVersion: v1 -appVersion: v0.8.3 +appVersion: v0.8.4 description: A Helm chart for k8s prometheus adapter home: https://github.com/DirectXMan12/k8s-prometheus-adapter keywords: @@ -23,4 +23,4 @@ name: prometheus-adapter sources: - https://github.com/kubernetes/charts - https://github.com/DirectXMan12/k8s-prometheus-adapter -version: 2.12.1 +version: 2.14.0 diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/README.md similarity index 98% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/README.md index 1fe1fad661d..b6028b01a2c 100644 --- a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/README.md +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/README.md @@ -118,7 +118,7 @@ Enabling this option will cause resource metrics to be served at `/apis/metrics. rules: resource: cpu: - containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>) + containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, container!=""}[3m])) by (<<.GroupBy>>) nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>) resources: overrides: @@ -130,7 +130,7 @@ rules: resource: pod containerLabel: container memory: - containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) + containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>, container!=""}) by (<<.GroupBy>>) nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) resources: overrides: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/NOTES.txt similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/NOTES.txt rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/NOTES.txt diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/_helpers.tpl similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/_helpers.tpl diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/certmanager.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/certmanager.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/certmanager.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/certmanager.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/configmap.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/configmap.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/configmap.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/deployment.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/deployment.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/deployment.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/pdb.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/pdb.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/pdb.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/pdb.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/psp.yaml similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/psp.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/psp.yaml index a88c9c2f2a0..c5ae1060747 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/psp.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/psp.yaml @@ -12,6 +12,9 @@ metadata: spec: {{- if .Values.hostNetwork.enabled }} hostNetwork: true + hostPorts: + - min: {{ .Values.listenPort }} + max: {{ .Values.listenPort }} {{- end }} fsGroup: rule: RunAsAny diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/secret.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/secret.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/secret.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/secret.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/serviceaccount.yaml similarity index 75% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/serviceaccount.yaml index 42ef0267ebc..c3050f0528c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/templates/serviceaccount.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/templates/serviceaccount.yaml @@ -9,4 +9,8 @@ metadata: heritage: {{ .Release.Service }} name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} namespace: {{ .Release.Namespace }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} {{- end -}} diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/values.yaml similarity index 93% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/values.yaml index d9108cb9ae3..3da3cf5bfbd 100644 --- a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-adapter/values.yaml @@ -7,7 +7,7 @@ affinity: {} image: repository: rancher/mirrored-directxman12-k8s-prometheus-adapter - tag: v0.8.3 + tag: v0.8.4 pullPolicy: IfNotPresent logLevel: 4 @@ -43,6 +43,11 @@ serviceAccount: # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + # Custom DNS configuration to be added to prometheus-adapter pods dnsConfig: {} # nameservers: @@ -85,7 +90,7 @@ rules: # metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) resource: {} # cpu: -# containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>) +# containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, container!=""}[3m])) by (<<.GroupBy>>) # nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>) # resources: # overrides: @@ -97,7 +102,7 @@ rules: # resource: pod # containerLabel: container # memory: -# containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) +# containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>, container!=""}) by (<<.GroupBy>>) # nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) # resources: # overrides: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/Chart.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/Chart.yaml index 887a6d5da96..5bdb4114712 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/Chart.yaml @@ -20,4 +20,4 @@ maintainers: name: prometheus-node-exporter sources: - https://github.com/prometheus/node_exporter/ -version: 1.16.2 +version: 1.18.1 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/README.md similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/README.md diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/NOTES.txt similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/NOTES.txt rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/NOTES.txt diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/_helpers.tpl similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/_helpers.tpl diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/daemonset.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/daemonset.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/daemonset.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/daemonset.yaml index a3a1bc8853a..bd64e6948be 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/daemonset.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/daemonset.yaml @@ -29,6 +29,10 @@ spec: {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} + {{- if .Values.extraInitContainers }} + initContainers: + {{ toYaml .Values.extraInitContainers | nindent 6 }} + {{- end }} containers: - name: node-exporter image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/endpoints.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/endpoints.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/endpoints.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/monitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/monitor.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/monitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/monitor.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/psp.yaml similarity index 92% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/psp.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/psp.yaml index f00506c9800..ec1259e01e7 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/psp.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/psp.yaml @@ -6,6 +6,10 @@ metadata: name: {{ template "prometheus-node-exporter.fullname" . }} namespace: {{ template "prometheus-node-exporter.namespace" . }} labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} +{{- if .Values.rbac.pspAnnotations }} + annotations: +{{ toYaml .Values.rbac.pspAnnotations | indent 4 }} +{{- end}} spec: privileged: false # Required to prevent escalations to root. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/serviceaccount.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/templates/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/templates/serviceaccount.yaml diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/values.yaml similarity index 97% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/values.yaml index 47dedd4d2df..5a0e6ca352a 100644 --- a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/prometheus-node-exporter/values.yaml @@ -77,6 +77,7 @@ rbac: ## If true, create & use Pod Security Policy resources ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ pspEnabled: true + pspAnnotations: {} # for deployments that have node_exporter deployed outside of the cluster, list # their addresses here @@ -175,3 +176,7 @@ sidecarVolumeMount: [] ## - name: collector-textfiles ## mountPath: /run/prometheus ## readOnly: false + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/Chart.yaml index e5205567edf..fc6be394165 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2ControllerManager/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rke2ControllerManager type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2ControllerManager/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/Chart.yaml index 7320aec04bc..11a2ee23a4a 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Etcd/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rke2Etcd type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Etcd/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/Chart.yaml new file mode 100644 index 00000000000..e506f7552c4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rke2IngressNginx +type: application +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2IngressNginx/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/Chart.yaml index 1e220116962..2f9a2c86e2d 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Proxy/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rke2Proxy type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Proxy/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/Chart.yaml index 4b076fb84d3..a20d2f6fd28 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rke2Scheduler/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rke2Scheduler type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rke2Scheduler/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/Chart.yaml index 09ef21031e8..76395f89088 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeControllerManager/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rkeControllerManager type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-proxy.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/templates/pushprox-proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-proxy.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeControllerManager/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/.helmignore similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/Chart.yaml index a4f4b02e473..a1360886daa 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeEtcd/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rkeEtcd type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy.yaml new file mode 100644 index 00000000000..571e1313851 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeEtcd/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/.helmignore similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/.helmignore rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/.helmignore diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/Chart.yaml new file mode 100644 index 00000000000..835ee8a8328 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rkeIngressNginx +type: application +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy.yaml new file mode 100644 index 00000000000..571e1313851 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeIngressNginx/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/Chart.yaml index f86115b6859..3a5213344e5 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeProxy/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rkeProxy type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy.yaml new file mode 100644 index 00000000000..571e1313851 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeProxy/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/Chart.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/Chart.yaml index 9b58f56a522..56021a2d97d 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/rkeScheduler/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/Chart.yaml @@ -10,4 +10,4 @@ description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushP clients. name: rkeScheduler type: application -version: 0.1.3 +version: 0.1.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy.yaml new file mode 100644 index 00000000000..571e1313851 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/rkeScheduler/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/.helmignore b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/Chart.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/Chart.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/Chart.yaml index fba9162f21e..f1cc32344e2 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/Chart.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/Chart.yaml @@ -5,11 +5,11 @@ annotations: catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-windows-exporter apiVersion: v1 -appVersion: 0.0.4 +appVersion: 0.0.2 description: Sets up monitoring metrics from Windows nodes via Prometheus windows-exporter maintainers: - email: arvind.iyengar@rancher.com name: aiyengar2 name: windowsExporter type: application -version: 0.1.0 +version: 0.1.1 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/README.md b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/README.md similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/README.md rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/README.md diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/scripts/check-wins-version.ps1 b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/scripts/check-wins-version.ps1 similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/scripts/check-wins-version.ps1 rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/scripts/check-wins-version.ps1 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/scripts/proxy-entry.ps1 b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/scripts/proxy-entry.ps1 similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/scripts/proxy-entry.ps1 rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/scripts/proxy-entry.ps1 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/scripts/run.ps1 b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/scripts/run.ps1 similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/scripts/run.ps1 rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/scripts/run.ps1 diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/_helpers.tpl new file mode 100644 index 00000000000..16975d9d05d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/_helpers.tpl @@ -0,0 +1,113 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# General + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +The components in this chart create additional resources that expand the longest created name strings. +The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26. +*/}} +{{- define "windowsExporter.name" -}} +{{ printf "%s-windows-exporter" .Release.Name }} +{{- end -}} + +{{- define "windowsExporter.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride -}} +{{- end -}} + +{{- define "windowsExporter.labels" -}} +k8s-app: {{ template "windowsExporter.name" . }} +release: {{ .Release.Name }} +component: "windows-exporter" +provider: kubernetes +{{- end -}} + +# Client + +{{- define "windowsExporter.client.nodeSelector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: windows +{{- else -}} +kubernetes.io/os: windows +{{- end -}} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector }} +{{- end }} +{{- end -}} + +{{- define "windowsExporter.client.tolerations" -}} +{{- if .Values.clients.tolerations -}} +{{ toYaml .Values.clients.tolerations }} +{{- else -}} +- operator: Exists +{{- end -}} +{{- end -}} + +{{- define "windowsExporter.client.env" -}} +- name: LISTEN_PORT + value: {{ required "Need .Values.clients.port to figure out where to get metrics from" .Values.clients.port | quote }} +{{- if .Values.clients.enabledCollectors }} +- name: ENABLED_COLLECTORS + value: {{ .Values.clients.enabledCollectors | quote }} +{{- end }} +{{- if .Values.clients.env }} +{{ toYaml .Values.clients.env }} +{{- end }} +{{- end -}} + +{{- define "windowsExporter.validatePathPrefix" -}} +{{- if .Values.global.cattle.rkeWindowsPathPrefix -}} +{{- $prefixPath := (.Values.global.cattle.rkeWindowsPathPrefix | replace "/" "\\") -}} +{{- if (not (hasSuffix "\\" $prefixPath)) -}} +{{- fail (printf ".Values.global.cattle.rkeWindowsPathPrefix must end in '/' or '\\', found %s" $prefixPath) -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "windowsExporter.renamedMetrics" -}} +{{- $renamed := dict -}} +{{/* v0.15.0 */}} +{{- $_ := set $renamed "windows_mssql_transactions_active_total" "windows_mssql_transactions_active" -}} +{{/* v0.16.0 */}} +{{- $_ := set $renamed "windows_adfs_ad_login_connection_failures" "windows_adfs_ad_login_connection_failures_total" -}} +{{- $_ := set $renamed "windows_adfs_certificate_authentications" "windows_adfs_certificate_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_device_authentications" "windows_adfs_device_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_extranet_account_lockouts" "windows_adfs_extranet_account_lockouts_total" -}} +{{- $_ := set $renamed "windows_adfs_federated_authentications" "windows_adfs_federated_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_passport_authentications" "windows_adfs_passport_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_password_change_failed" "windows_adfs_password_change_failed_total" -}} +{{- $_ := set $renamed "windows_adfs_password_change_succeeded" "windows_adfs_password_change_succeeded_total" -}} +{{- $_ := set $renamed "windows_adfs_token_requests" "windows_adfs_token_requests_total" -}} +{{- $_ := set $renamed "windows_adfs_windows_integrated_authentications" "windows_adfs_windows_integrated_authentications_total" -}} +{{- $_ := set $renamed "windows_net_packets_outbound_errors" "windows_net_packets_outbound_errors_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_discarded" "windows_net_packets_received_discarded_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_errors" "windows_net_packets_received_errors_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_total" "windows_net_packets_received_total_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_unknown" "windows_net_packets_received_unknown_total" -}} +{{- $_ := set $renamed "windows_dns_memory_used_bytes_total" "windows_dns_memory_used_bytes" -}} +{{- $renamed | toJson -}} +{{- end -}} + +{{- define "windowsExporter.renamedMetricsRelabeling" -}} +{{- range $original, $new := (include "windowsExporter.renamedMetrics" . | fromJson) -}} +- sourceLabels: [__name__] + regex: {{ $original }} + replacement: '{{ $new }}' + targetLabel: __name__ +{{ end -}} +{{- end -}} + +{{- define "windowsExporter.renamedMetricsRules" -}} +{{- range $original, $new := (include "windowsExporter.renamedMetrics" . | fromJson) -}} +- record: {{ $original }} + expr: {{ $new }} +{{ end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/configmap.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/configmap.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/configmap.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/daemonset.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/daemonset.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/daemonset.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/daemonset.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/prometheusrule.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/prometheusrule.yaml new file mode 100644 index 00000000000..f31983122a6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/prometheusrule.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.prometheusRule .Values.clients }}{{- if and .Values.prometheusRule.enabled .Values.clients.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + labels: {{ include "windowsExporter.labels" . | nindent 4 }} + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} +spec: + groups: + - name: windows-exporter-relabel.rules + rules: +{{- include "windowsExporter.renamedMetricsRules" . | nindent 4 -}} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/rbac.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/rbac.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/rbac.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/servicemonitor.yaml similarity index 88% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/servicemonitor.yaml index a2c2f0b54e2..26ece9b05a2 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/templates/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/templates/servicemonitor.yaml @@ -17,14 +17,11 @@ spec: endpoints: - port: windows-metrics metricRelabelings: +{{- include "windowsExporter.renamedMetricsRelabeling" . | nindent 4 -}} - sourceLabels: [__name__] regex: 'wmi_(.*)' replacement: 'windows_$1' targetLabel: __name__ - - sourceLabels: [__name__] - regex: windows_mssql_transactions_active_total - replacement: 'windows_mssql_transactions_active' - targetLabel: __name__ - sourceLabels: [volume, nic] regex: (.*);(.*) separator: '' diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/values.yaml similarity index 91% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/values.yaml index 6130890bd83..aa1fd197355 100644 --- a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/charts/windowsExporter/values.yaml @@ -13,6 +13,10 @@ global: serviceMonitor: enabled: true +# Configure PrometheusRule that renames existing metrics +prometheusRule: + enabled: true + ## Components scraping metrics from Windows nodes ## clients: @@ -21,7 +25,7 @@ clients: port: 9796 image: repository: rancher/windows_exporter-package - tag: v0.0.1 + tag: v0.0.2 os: "windows" # Specify the IP addresses of nodes that you want to collect metrics from diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/ingress-nginx/nginx.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/ingress-nginx/nginx.json similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/ingress-nginx/nginx.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/ingress-nginx/nginx.json index 347c9eb05c2..d4793ac678d 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/ingress-nginx/nginx.json +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/ingress-nginx/nginx.json @@ -1415,7 +1415,7 @@ "multi": false, "name": "ingress", "options": [], - "query": "label_values(nginx_ingress_controller_requests{namespace=~\"$namespace\",controller_class=~\"$controller_class\",controller=~\"$controller\"}, ingress) ", + "query": "label_values(nginx_ingress_controller_requests{namespace=~\"$namespace\",controller_class=~\"$controller_class\",controller_pod=~\"$controller\"}, ingress) ", "refresh": 1, "regex": "", "sort": 2, @@ -1460,4 +1460,4 @@ "title": "NGINX / Ingress Controller", "uid": "nginx", "version": 1 -} +} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/ingress-nginx/request-handling-performance.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/ingress-nginx/request-handling-performance.json similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/ingress-nginx/request-handling-performance.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/ingress-nginx/request-handling-performance.json index 5635ae97645..d0125f0ac9b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/ingress-nginx/request-handling-performance.json +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/ingress-nginx/request-handling-performance.json @@ -481,7 +481,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress = \"$ingress\",\n status =~ \"[4-5].*\"\n}[1m])) / sum by (path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress = \"$ingress\",\n}[1m]))", + "expr": "sum by (path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\",\n status =~ \"[4-5].*\"\n}[1m])) / sum by (path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\",\n}[1m]))", "interval": "", "intervalFactor": 1, "legendFormat": "{{ path }}", @@ -573,7 +573,7 @@ "steppedLine": false, "targets": [ { - "expr": "sum by (path) (rate(nginx_ingress_controller_response_duration_seconds_sum{ingress = \"$ingress\"}[1m]))", + "expr": "sum by (path) (rate(nginx_ingress_controller_response_duration_seconds_sum{ingress =~ \"$ingress\"}[1m]))", "interval": "", "intervalFactor": 1, "legendFormat": "{{ path }}", @@ -764,7 +764,7 @@ "refId": "D" }, { - "expr": " sum (rate(nginx_ingress_controller_response_size_bucket{\n namespace =~ \"$namespace\",\n ingress =~ \"$ingress\",\n }[1m])) by (le)\n", + "expr": " sum (rate(nginx_ingress_controller_response_size_bucket{\n ingress =~ \"$ingress\",\n }[1m])) by (le)\n", "hide": true, "legendFormat": "{{le}}", "refId": "A" @@ -978,4 +978,4 @@ "title": "NGINX / Request Handling Performance", "uid": "4GFbkOsZk", "version": 1 - } +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/cluster/rancher-cluster-nodes.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/cluster/rancher-cluster-nodes.json similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/cluster/rancher-cluster-nodes.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/cluster/rancher-cluster-nodes.json index b8c1ab7e6f1..b33895a0520 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/cluster/rancher-cluster-nodes.json +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/cluster/rancher-cluster-nodes.json @@ -565,25 +565,25 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "expr": "sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", "interval": "", "legendFormat": "Receive Errors ({{instance}})", "refId": "A" }, { - "expr": "sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "expr": "sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", "interval": "", "legendFormat": "Receive Total ({{instance}})", "refId": "B" }, { - "expr": "sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "expr": "sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_outbound_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", "interval": "", "legendFormat": "Transmit Errors ({{instance}})", "refId": "C" }, { - "expr": "sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "expr": "sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_discarded_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", "interval": "", "legendFormat": "Receive Dropped ({{instance}})", "refId": "D" @@ -696,7 +696,7 @@ "refId": "A" }, { - "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", "interval": "", "legendFormat": "Receive Total ({{instance}})", "refId": "B" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/cluster/rancher-cluster.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/cluster/rancher-cluster.json similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/cluster/rancher-cluster.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/cluster/rancher-cluster.json index 29cc9167570..8fccbc24c10 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/cluster/rancher-cluster.json +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/cluster/rancher-cluster.json @@ -551,25 +551,25 @@ "steppedLine": false, "targets": [ { - "expr": "(sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Receive Errors", "refId": "A" }, { - "expr": "(sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Receive Total", "refId": "B" }, { - "expr": "(sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Transmit Errors", "refId": "C" }, { - "expr": "(sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_discarded_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Receive Dropped", "refId": "D" @@ -679,7 +679,7 @@ "refId": "A" }, { - "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval]))", + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval]))", "interval": "", "legendFormat": "Receive Total", "refId": "B" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/home/rancher-default-home.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/home/rancher-default-home.json similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/home/rancher-default-home.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/home/rancher-default-home.json index 7923d696979..13b153cf8d9 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/home/rancher-default-home.json +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/home/rancher-default-home.json @@ -470,7 +470,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum(kube_node_status_allocatable_cpu_cores{})", + "expr": "sum(kube_node_status_allocatable_cpu_cores{}) OR sum(kube_node_status_allocatable{resource=\"cpu\",unit=\"core\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", @@ -654,7 +654,7 @@ "tableColumn": "", "targets": [ { - "expr": "sum (kube_node_status_allocatable_memory_bytes{})", + "expr": "sum(kube_node_status_allocatable_memory_bytes{}) OR sum(kube_node_status_allocatable{resource=\"memory\", unit=\"byte\"})", "interval": "10s", "intervalFactor": 1, "refId": "A", diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-etcd-nodes.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-etcd-nodes.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-etcd-nodes.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-etcd-nodes.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-etcd.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-etcd.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-etcd.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-etcd.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-k8s-components-nodes.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-k8s-components-nodes.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-k8s-components-nodes.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-k8s-components-nodes.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-k8s-components.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-k8s-components.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/k8s/rancher-k8s-components.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/k8s/rancher-k8s-components.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/nodes/rancher-node-detail.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/nodes/rancher-node-detail.json similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/nodes/rancher-node-detail.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/nodes/rancher-node-detail.json index 0b57efa2ecd..d71bc02b780 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/nodes/rancher-node-detail.json +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/nodes/rancher-node-detail.json @@ -561,25 +561,25 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "expr": "sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", "interval": "", "legendFormat": "Receive Errors ({{device}})", "refId": "A" }, { - "expr": "sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "expr": "sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", "interval": "", "legendFormat": "Receive Total ({{device}})", "refId": "B" }, { - "expr": "sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "expr": "sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_outbound_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", "interval": "", "legendFormat": "Transmit Errors ({{device}})", "refId": "C" }, { - "expr": "sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "expr": "sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_discarded_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", "interval": "", "legendFormat": "Receive Dropped ({{device}})", "refId": "D" @@ -692,7 +692,7 @@ "refId": "A" }, { - "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", "interval": "", "legendFormat": "Receive Total ({{device}})", "refId": "B" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/nodes/rancher-node.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/nodes/rancher-node.json similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/nodes/rancher-node.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/nodes/rancher-node.json index 7324c4164bb..c4b77db64cd 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/nodes/rancher-node.json +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/nodes/rancher-node.json @@ -551,25 +551,25 @@ "steppedLine": false, "targets": [ { - "expr": "(sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Receive Errors", "refId": "A" }, { - "expr": "(sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Receive Total", "refId": "B" }, { - "expr": "(sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_errors_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Transmit Errors", "refId": "C" }, { - "expr": "(sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "expr": "(sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_discarded_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", "interval": "", "legendFormat": "Receive Dropped", "refId": "D" @@ -679,7 +679,7 @@ "refId": "A" }, { - "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval]))", + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_received_total_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval]))", "interval": "", "legendFormat": "Receive Total", "refId": "B" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/pods/rancher-pod-containers.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/pods/rancher-pod-containers.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/pods/rancher-pod-containers.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/pods/rancher-pod-containers.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/pods/rancher-pod.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/pods/rancher-pod.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/pods/rancher-pod.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/pods/rancher-pod.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/workloads/rancher-workload-pods.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/workloads/rancher-workload-pods.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/workloads/rancher-workload-pods.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/workloads/rancher-workload-pods.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/workloads/rancher-workload.json b/charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/workloads/rancher-workload.json similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/files/rancher/workloads/rancher-workload.json rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/files/rancher/workloads/rancher-workload.json diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/NOTES.txt similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/NOTES.txt rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/NOTES.txt diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/_helpers.tpl similarity index 76% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/_helpers.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/_helpers.tpl index 123cbad6d5c..b47ae096c7b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/_helpers.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/_helpers.tpl @@ -46,6 +46,18 @@ e.g. {{ include "call-nested" (list . "grafana" "grafana.fullname") }} {{- end -}} {{- end }} +{{- define "exporter.kubelet.enabled" -}} +{{- if or .Values.kubelet.enabled .Values.hardenedKubelet.enabled .Values.k3sServer.enabled -}} +"true" +{{- end -}} +{{- end }} + +{{- define "exporter.kubeletService.enabled" -}} +{{- if or .Values.hardenedKubelet.enabled .Values.prometheusOperator.kubeletService.enabled .Values.k3sServer.enabled -}} +"true" +{{- end -}} +{{- end }} + {{- define "exporter.kubeControllerManager.jobName" -}} {{- if .Values.k3sServer.enabled -}} k3s-server @@ -70,6 +82,14 @@ kube-proxy {{- end -}} {{- end }} +{{- define "exporter.kubelet.jobName" -}} +{{- if .Values.k3sServer.enabled -}} +k3s-server +{{- else -}} +kubelet +{{- end -}} +{{- end }} + {{- define "kubelet.serviceMonitor.resourcePath" -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} {{- if not (eq .Values.kubelet.serviceMonitor.resourcePath "/metrics/resource/v1alpha1") -}} @@ -153,6 +173,10 @@ The longest name that gets created adds and extra 37 characters, so truncation s {{/* Generate basic labels */}} {{- define "kube-prometheus-stack.labels" }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: "{{ replace "+" "_" .Chart.Version }}" +app.kubernetes.io/part-of: {{ template "kube-prometheus-stack.name" . }} chart: {{ template "kube-prometheus-stack.chartref" . }} release: {{ $.Release.Name | quote }} heritage: {{ $.Release.Service | quote }} @@ -197,4 +221,31 @@ Allow the release namespace to be overridden for multi-namespace deployments in {{- else -}} {{- .Release.Namespace -}} {{- end -}} -{{- end -}} \ No newline at end of file +{{- end -}} + +{{/* Allow KubeVersion to be overridden. */}} +{{- define "kube-prometheus-stack.ingress.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersionOverride -}} +{{- end -}} + +{{/* Get Ingress API Version */}} +{{- define "kube-prometheus-stack.ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "kube-prometheus-stack.ingress.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* Check Ingress stability */}} +{{- define "kube-prometheus-stack.ingress.isStable" -}} + {{- eq (include "kube-prometheus-stack.ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* Check Ingress supports pathType */}} +{{/* pathType was added to networking.k8s.io/v1beta1 in Kubernetes 1.18 */}} +{{- define "kube-prometheus-stack.ingress.supportsPathType" -}} + {{- or (eq (include "kube-prometheus-stack.ingress.isStable" .) "true") (and (eq (include "kube-prometheus-stack.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "kube-prometheus-stack.ingress.kubeVersion" .))) -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/alertmanager.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/alertmanager.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/alertmanager.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/alertmanager.yaml index 8967c86ff22..43d9954ca0c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/alertmanager.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/alertmanager.yaml @@ -7,6 +7,10 @@ metadata: labels: app: {{ template "kube-prometheus-stack.name" . }}-alertmanager {{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.alertmanager.annotations }} + annotations: +{{ toYaml .Values.alertmanager.annotations | indent 4 }} +{{- end }} spec: {{- if .Values.alertmanager.alertmanagerSpec.image }} image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.alertmanagerSpec.image.repository }}:{{ .Values.alertmanager.alertmanagerSpec.image.tag }} @@ -89,7 +93,7 @@ spec: labelSelector: matchExpressions: - {key: app, operator: In, values: [alertmanager]} - - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-alertmanager]} + - {key: alertmanager, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-alertmanager]} {{- else if eq .Values.alertmanager.alertmanagerSpec.podAntiAffinity "soft" }} podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/cleanupSecret.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/cleanupSecret.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/cleanupSecret.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/cleanupSecret.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/ingress.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/ingress.yaml similarity index 70% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/ingress.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/ingress.yaml index 50fab145566..f337502e97e 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/ingress.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/ingress.yaml @@ -4,11 +4,9 @@ {{- $servicePort := .Values.alertmanager.service.port -}} {{- $routePrefix := list .Values.alertmanager.alertmanagerSpec.routePrefix }} {{- $paths := .Values.alertmanager.ingress.paths | default $routePrefix -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} -apiVersion: networking.k8s.io/v1beta1 -{{ else }} -apiVersion: extensions/v1beta1 -{{ end -}} +{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "kube-prometheus-stack.ingress.supportsPathType" .) "true" -}} +apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }} kind: Ingress metadata: name: {{ $serviceName }} @@ -24,7 +22,7 @@ metadata: {{- end }} {{ include "kube-prometheus-stack.labels" . | indent 4 }} spec: - {{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if $apiIsStable }} {{- if .Values.alertmanager.ingress.ingressClassName }} ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }} {{- end }} @@ -37,25 +35,39 @@ spec: paths: {{- range $p := $paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} serviceName: {{ $serviceName }} servicePort: {{ $servicePort }} - {{- end -}} + {{- end }} + {{- end -}} {{- end -}} {{- else }} - http: paths: {{- range $p := $paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} serviceName: {{ $serviceName }} servicePort: {{ $servicePort }} + {{- end }} {{- end -}} {{- end -}} {{- if .Values.alertmanager.ingress.tls }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/ingressperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/ingressperreplica.yaml similarity index 76% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/ingressperreplica.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/ingressperreplica.yaml index 3d673b2c81e..f21bf961697 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/ingressperreplica.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/ingressperreplica.yaml @@ -3,6 +3,8 @@ {{- $count := .Values.alertmanager.alertmanagerSpec.replicas | int -}} {{- $servicePort := .Values.alertmanager.service.port -}} {{- $ingressValues := .Values.alertmanager.ingressPerReplica -}} +{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "kube-prometheus-stack.ingress.supportsPathType" .) "true" -}} apiVersion: v1 kind: List metadata: @@ -11,17 +13,13 @@ metadata: items: {{ range $i, $e := until $count }} - kind: Ingress - {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} - apiVersion: networking.k8s.io/v1beta1 - {{ else }} - apiVersion: extensions/v1beta1 - {{ end -}} + apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" $ }} metadata: name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} namespace: {{ template "kube-prometheus-stack.namespace" $ }} labels: app: {{ include "kube-prometheus-stack.name" $ }}-alertmanager -{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{ include "kube-prometheus-stack.labels" $ | indent 8 }} {{- if $ingressValues.labels }} {{ toYaml $ingressValues.labels | indent 8 }} {{- end }} @@ -30,7 +28,7 @@ items: {{ toYaml $ingressValues.annotations | indent 8 }} {{- end }} spec: - {{- if or ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") ($.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if $apiIsStable }} {{- if $ingressValues.ingressClassName }} ingressClassName: {{ $ingressValues.ingressClassName }} {{- end }} @@ -41,12 +39,19 @@ items: paths: {{- range $p := $ingressValues.paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} + port: + number: {{ $servicePort }} + {{- else }} serviceName: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} servicePort: {{ $servicePort }} + {{- end }} {{- end -}} {{- if or $ingressValues.tlsSecretName $ingressValues.tlsSecretPerReplica.enabled }} tls: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/podDisruptionBudget.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/podDisruptionBudget.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/podDisruptionBudget.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/podDisruptionBudget.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/psp-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/psp-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/psp-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/psp-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/psp-rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/psp-rolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/psp-rolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/psp-rolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/psp.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/psp.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/psp.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/secret.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/secret.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/secret.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/secret.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/serviceaccount.yaml similarity index 77% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/serviceaccount.yaml index c5e6e92282b..066c7fc89f8 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/serviceaccount.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/serviceaccount.yaml @@ -6,11 +6,15 @@ metadata: namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-alertmanager + app.kubernetes.io/component: alertmanager {{ include "kube-prometheus-stack.labels" . | indent 4 }} {{- if .Values.alertmanager.serviceAccount.annotations }} annotations: {{ toYaml .Values.alertmanager.serviceAccount.annotations | indent 4 }} {{- end }} +{{- if .Values.global.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | indent 2 }} {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/servicemonitor.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/servicemonitor.yaml index a699accb8c7..2dc9b86842e 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/servicemonitor.yaml @@ -21,6 +21,9 @@ spec: {{- if .Values.alertmanager.serviceMonitor.interval }} interval: {{ .Values.alertmanager.serviceMonitor.interval }} {{- end }} + {{- if .Values.alertmanager.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.alertmanager.serviceMonitor.proxyUrl}} + {{- end }} {{- if .Values.alertmanager.serviceMonitor.scheme }} scheme: {{ .Values.alertmanager.serviceMonitor.scheme }} {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/serviceperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/serviceperreplica.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/alertmanager/serviceperreplica.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/alertmanager/serviceperreplica.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/core-dns/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/core-dns/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/core-dns/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/core-dns/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/core-dns/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/core-dns/servicemonitor.yaml similarity index 89% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/core-dns/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/core-dns/servicemonitor.yaml index f34549048f6..041707f026b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/core-dns/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/core-dns/servicemonitor.yaml @@ -21,6 +21,9 @@ spec: {{- if .Values.coreDns.serviceMonitor.interval}} interval: {{ .Values.coreDns.serviceMonitor.interval }} {{- end }} + {{- if .Values.coreDns.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.coreDns.serviceMonitor.proxyUrl}} + {{- end }} bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token {{- if .Values.coreDns.serviceMonitor.metricRelabelings }} metricRelabelings: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-api-server/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-api-server/servicemonitor.yaml similarity index 82% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-api-server/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-api-server/servicemonitor.yaml index b7ea3817ce9..df28b970f2c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-api-server/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-api-server/servicemonitor.yaml @@ -13,15 +13,18 @@ spec: {{- if .Values.kubeApiServer.serviceMonitor.interval }} interval: {{ .Values.kubeApiServer.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubeApiServer.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeApiServer.serviceMonitor.proxyUrl}} + {{- end }} port: https scheme: https {{- if .Values.kubeApiServer.serviceMonitor.metricRelabelings }} metricRelabelings: {{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.metricRelabelings | indent 6) . }} {{- end }} -{{- if .Values.kubeApiServer.relabelings }} +{{- if .Values.kubeApiServer.serviceMonitor.relabelings }} relabelings: -{{ toYaml .Values.kubeApiServer.relabelings | indent 6 }} +{{ toYaml .Values.kubeApiServer.serviceMonitor.relabelings | indent 6 }} {{- end }} tlsConfig: caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-controller-manager/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-controller-manager/endpoints.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-controller-manager/endpoints.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-controller-manager/endpoints.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-controller-manager/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-controller-manager/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-controller-manager/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-controller-manager/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-controller-manager/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-controller-manager/servicemonitor.yaml similarity index 92% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-controller-manager/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-controller-manager/servicemonitor.yaml index 38e2b1970ac..689dc0e312b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-controller-manager/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-controller-manager/servicemonitor.yaml @@ -22,6 +22,9 @@ spec: interval: {{ .Values.kubeControllerManager.serviceMonitor.interval }} {{- end }} bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeControllerManager.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeControllerManager.serviceMonitor.proxyUrl}} + {{- end }} {{- if .Values.kubeControllerManager.serviceMonitor.https }} scheme: https tlsConfig: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-dns/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-dns/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-dns/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-dns/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-dns/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-dns/servicemonitor.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-dns/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-dns/servicemonitor.yaml index 28d06ae8305..923a5bcfa25 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-dns/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-dns/servicemonitor.yaml @@ -22,6 +22,9 @@ spec: interval: {{ .Values.kubeDns.serviceMonitor.interval }} {{- end }} bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeDns.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeDns.serviceMonitor.proxyUrl}} + {{- end }} {{- if .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings }} metricRelabelings: {{ tpl (toYaml .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings | indent 4) . }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-etcd/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-etcd/endpoints.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-etcd/endpoints.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-etcd/endpoints.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-etcd/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-etcd/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-etcd/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-etcd/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-etcd/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-etcd/servicemonitor.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-etcd/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-etcd/servicemonitor.yaml index d5816f44170..689e1fdc987 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-etcd/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-etcd/servicemonitor.yaml @@ -22,6 +22,9 @@ spec: interval: {{ .Values.kubeEtcd.serviceMonitor.interval }} {{- end }} bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeEtcd.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeEtcd.serviceMonitor.proxyUrl}} + {{- end }} {{- if eq .Values.kubeEtcd.serviceMonitor.scheme "https" }} scheme: https tlsConfig: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-proxy/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-proxy/endpoints.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-proxy/endpoints.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-proxy/endpoints.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-proxy/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-proxy/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-proxy/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-proxy/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-proxy/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-proxy/servicemonitor.yaml similarity index 91% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-proxy/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-proxy/servicemonitor.yaml index ed163268221..bc3b7be1dd1 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-proxy/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-proxy/servicemonitor.yaml @@ -22,6 +22,9 @@ spec: interval: {{ .Values.kubeProxy.serviceMonitor.interval }} {{- end }} bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeProxy.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeProxy.serviceMonitor.proxyUrl}} + {{- end }} {{- if .Values.kubeProxy.serviceMonitor.https }} scheme: https tlsConfig: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-scheduler/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-scheduler/endpoints.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-scheduler/endpoints.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-scheduler/endpoints.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-scheduler/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-scheduler/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-scheduler/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-scheduler/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-scheduler/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-scheduler/servicemonitor.yaml similarity index 92% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-scheduler/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-scheduler/servicemonitor.yaml index 7caef4f581b..a9a454bc47b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-scheduler/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-scheduler/servicemonitor.yaml @@ -22,6 +22,9 @@ spec: interval: {{ .Values.kubeScheduler.serviceMonitor.interval }} {{- end }} bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeScheduler.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeScheduler.serviceMonitor.proxyUrl}} + {{- end }} {{- if .Values.kubeScheduler.serviceMonitor.https }} scheme: https tlsConfig: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-state-metrics/serviceMonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-state-metrics/serviceMonitor.yaml similarity index 79% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-state-metrics/serviceMonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-state-metrics/serviceMonitor.yaml index 5b723b214d8..caeaa1e44f9 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kube-state-metrics/serviceMonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kube-state-metrics/serviceMonitor.yaml @@ -14,6 +14,9 @@ spec: {{- if .Values.kubeStateMetrics.serviceMonitor.interval }} interval: {{ .Values.kubeStateMetrics.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubeStateMetrics.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubeStateMetrics.serviceMonitor.proxyUrl}} + {{- end }} honorLabels: true {{- if .Values.kubeStateMetrics.serviceMonitor.metricRelabelings }} metricRelabelings: @@ -22,6 +25,11 @@ spec: {{- if .Values.kubeStateMetrics.serviceMonitor.relabelings }} relabelings: {{ toYaml .Values.kubeStateMetrics.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- if .Values.kubeStateMetrics.serviceMonitor.namespaceOverride }} + namespaceSelector: + matchNames: + - {{ .Values.kubeStateMetrics.serviceMonitor.namespaceOverride }} {{- end }} selector: matchLabels: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kubelet/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kubelet/servicemonitor.yaml similarity index 81% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kubelet/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kubelet/servicemonitor.yaml index 15811312cca..9d707a5527b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/kubelet/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/kubelet/servicemonitor.yaml @@ -1,4 +1,7 @@ -{{- if .Values.kubelet.enabled }} +{{- if (and (not .Values.kubelet.enabled) .Values.hardenedKubelet.enabled) }} +{{ required "Cannot set .Values.hardenedKubelet.enabled=true when .Values.kubelet.enabled=false" "" }} +{{- end }} +{{- if (and .Values.kubelet.enabled (not .Values.hardenedKubelet.enabled) (not .Values.k3sServer.enabled)) }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -15,6 +18,9 @@ spec: {{- if .Values.kubelet.serviceMonitor.interval }} interval: {{ .Values.kubelet.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubelet.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }} + {{- end }} tlsConfig: caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt insecureSkipVerify: true @@ -35,6 +41,9 @@ spec: {{- if .Values.kubelet.serviceMonitor.interval }} interval: {{ .Values.kubelet.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubelet.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }} + {{- end }} {{- if .Values.kubelet.serviceMonitor.scrapeTimeout }} scrapeTimeout: {{ .Values.kubelet.serviceMonitor.scrapeTimeout }} {{- end }} @@ -59,6 +68,9 @@ spec: {{- if .Values.kubelet.serviceMonitor.interval }} interval: {{ .Values.kubelet.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubelet.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }} + {{- end }} honorLabels: true tlsConfig: caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt @@ -80,6 +92,9 @@ spec: {{- if .Values.kubelet.serviceMonitor.interval }} interval: {{ .Values.kubelet.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubelet.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }} + {{- end }} honorLabels: true tlsConfig: caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt @@ -99,6 +114,9 @@ spec: {{- if .Values.kubelet.serviceMonitor.interval }} interval: {{ .Values.kubelet.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubelet.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }} + {{- end }} honorLabels: true {{- if .Values.kubelet.serviceMonitor.metricRelabelings }} metricRelabelings: @@ -114,6 +132,9 @@ spec: {{- if .Values.kubelet.serviceMonitor.interval }} interval: {{ .Values.kubelet.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubelet.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }} + {{- end }} honorLabels: true {{- if .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings }} metricRelabelings: @@ -129,6 +150,9 @@ spec: {{- if .Values.kubelet.serviceMonitor.interval }} interval: {{ .Values.kubelet.serviceMonitor.interval }} {{- end }} + {{- if .Values.kubelet.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.kubelet.serviceMonitor.proxyUrl }} + {{- end }} honorLabels: true {{- if .Values.kubelet.serviceMonitor.resourceMetricRelabelings }} metricRelabelings: @@ -147,5 +171,6 @@ spec: - {{ .Values.kubelet.namespace }} selector: matchLabels: + app.kubernetes.io/managed-by: prometheus-operator k8s-app: kubelet {{- end}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/node-exporter/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/node-exporter/servicemonitor.yaml similarity index 64% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/node-exporter/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/node-exporter/servicemonitor.yaml index 5ca5f1b75c0..09b6edf7bd3 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/exporters/node-exporter/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/exporters/node-exporter/servicemonitor.yaml @@ -1,4 +1,7 @@ -{{- if .Values.nodeExporter.enabled }} +{{- if (and (not .Values.nodeExporter.enabled) .Values.hardenedNodeExporter.enabled) }} +{{ required "Cannot set .Values.hardenedNodeExporter.enabled=true when .Values.nodeExporter.enabled=false" "" }} +{{- end }} +{{- if (and .Values.nodeExporter.enabled (not .Values.hardenedNodeExporter.enabled)) }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -13,11 +16,19 @@ spec: matchLabels: app: prometheus-node-exporter release: {{ $.Release.Name }} + {{- if (index .Values "prometheus-node-exporter" "namespaceOverride") }} + namespaceSelector: + matchNames: + - {{ index .Values "prometheus-node-exporter" "namespaceOverride" }} + {{- end }} endpoints: - port: metrics {{- if .Values.nodeExporter.serviceMonitor.interval }} interval: {{ .Values.nodeExporter.serviceMonitor.interval }} {{- end }} + {{- if .Values.nodeExporter.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.nodeExporter.serviceMonitor.proxyUrl}} + {{- end }} {{- if .Values.nodeExporter.serviceMonitor.scrapeTimeout }} scrapeTimeout: {{ .Values.nodeExporter.serviceMonitor.scrapeTimeout }} {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/configmap-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/configmap-dashboards.yaml similarity index 81% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/configmap-dashboards.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/configmap-dashboards.yaml index f11af8285d2..c455f536547 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/configmap-dashboards.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/configmap-dashboards.yaml @@ -1,5 +1,5 @@ -{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} -{{- $files := .Files.Glob "dashboards/*.json" }} +{{- if or (and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled) .Values.grafana.forceDeployDashboards }} +{{- $files := .Files.Glob "dashboards-1.14/*.json" }} {{- if $files }} apiVersion: v1 kind: ConfigMapList diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/configmaps-datasources.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/configmaps-datasources.yaml similarity index 87% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/configmaps-datasources.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/configmaps-datasources.yaml index c6700d84e4d..dc74664fab0 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/configmaps-datasources.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/configmaps-datasources.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.grafana.enabled .Values.grafana.sidecar.datasources.enabled }} +{{- if or (and .Values.grafana.enabled .Values.grafana.sidecar.datasources.enabled) .Values.grafana.forceDeployDatasources }} apiVersion: v1 kind: ConfigMap metadata: @@ -20,7 +20,11 @@ data: {{- if .Values.grafana.sidecar.datasources.defaultDatasourceEnabled }} - name: Prometheus type: prometheus + {{- if .Values.grafana.sidecar.datasources.url }} + url: {{ .Values.grafana.sidecar.datasources.url }} + {{- else }} url: http://{{ template "kube-prometheus-stack.fullname" . }}-prometheus:{{ .Values.prometheus.service.port }}/{{ trimPrefix "/" .Values.prometheus.prometheusSpec.routePrefix }} + {{- end }} access: proxy isDefault: true jsonData: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/apiserver.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/apiserver.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/apiserver.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/apiserver.yaml index efed4087352..0e8c7bab679 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/apiserver.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/apiserver.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeApiServer.enabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled .Values.kubeApiServer.enabled }} apiVersion: v1 kind: ConfigMap metadata: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/cluster-total.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/cluster-total.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/cluster-total.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/cluster-total.yaml index fde561c8214..e446089a6ea 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/cluster-total.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/cluster-total.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -1825,14 +1825,14 @@ data: "datasource": "$datasource", "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, "includeAll": false, - "label": "cluster", + "label": null, "multi": false, "name": "cluster", "options": [ ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 0, "tagValuesQuery": "", @@ -1879,4 +1879,4 @@ data: "uid": "ff635a025bcfea7bc3dd4f508990a3e9", "version": 0 } -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/controller-manager.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/controller-manager.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/controller-manager.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/controller-manager.yaml index 675cbe618a7..ec7d03b7de7 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/controller-manager.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/controller-manager.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} {{- if (include "exporter.kubeControllerManager.enabled" .)}} apiVersion: v1 kind: ConfigMap @@ -108,7 +108,11 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(up{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\"})", + {{- if .Values.k3sServer.enabled }} + "expr": "sum(up{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", metrics_path=\"/metrics\"})", + {{- else }} + "expr": "sum(up{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\"})", + {{- end }} "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -177,7 +181,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(workqueue_adds_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name)", + "expr": "sum(rate(workqueue_adds_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", @@ -283,7 +287,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(workqueue_depth{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name)", + "expr": "sum(rate(workqueue_depth{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", @@ -389,7 +393,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name, le))", + "expr": "histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", @@ -609,7 +613,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\", verb=\"POST\"}[5m])) by (verb, url, le))", + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\", verb=\"POST\"}[5m])) by (verb, url, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", @@ -715,7 +719,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", @@ -821,7 +825,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "process_resident_memory_bytes{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}", + "expr": "process_resident_memory_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -914,7 +918,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "rate(process_cpu_seconds_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}[5m])", + "expr": "rate(process_cpu_seconds_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}[5m])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1007,7 +1011,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "go_goroutines{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}", + "expr": "go_goroutines{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1091,6 +1095,32 @@ data: "allValue": null, "current": { + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + }, "datasource": "$datasource", "hide": 0, @@ -1101,7 +1131,7 @@ data: "options": [ ], - "query": "label_values(process_cpu_seconds_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\"}, instance)", + "query": "label_values(process_cpu_seconds_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\"}, instance)", "refresh": 2, "regex": "", "sort": 1, diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/etcd.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/etcd.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/etcd.yaml index ac54228e956..282cadafe02 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards/etcd.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/etcd.yaml @@ -1,10 +1,10 @@ {{- /* -Generated from 'etcd' from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/current/op-guide/grafana.json +Generated from 'etcd' from https://raw.githubusercontent.com/etcd-io/website/master/content/en/docs/v3.4/op-guide/grafana.json Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} {{- if (include "exporter.kubeEtcd.enabled" .)}} apiVersion: v1 kind: ConfigMap @@ -29,6 +29,7 @@ data: "editable": true, "gnetId": null, "hideControls": false, + "id": 6, "links": [], "refresh": "10s", "rows": [ @@ -1111,7 +1112,6 @@ data: }, "timezone": "browser", "title": "etcd", - "uid": "c2f4e12cdf69feb95caa41a5a1b423d9", "version": 215 } {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-coredns.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-coredns.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-coredns.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-coredns.yaml index 8e4eaec6108..def05e21660 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-coredns.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-coredns.yaml @@ -1,11 +1,13 @@ {{- /* Added manually, can be changed in-place. */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.coreDns.enabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled .Values.coreDns.enabled }} apiVersion: v1 kind: ConfigMap metadata: namespace: {{ .Values.grafana.defaultDashboards.namespace }} name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-coredns" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} labels: {{- if $.Values.grafana.sidecar.dashboards.label }} {{ $.Values.grafana.sidecar.dashboards.label }}: "1" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml similarity index 82% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml index 9639fc15c5d..526fb5872e2 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -163,7 +163,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"})", + "expr": "sum(namespace_cpu:kube_pod_container_resource_requests:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable{resource=\"cpu\",cluster=\"$cluster\"})", "format": "time_series", "instant": true, "intervalFactor": 2, @@ -247,7 +247,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"})", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) / sum(kube_node_status_allocatable{resource=\"cpu\",cluster=\"$cluster\"})", "format": "time_series", "instant": true, "intervalFactor": 2, @@ -331,7 +331,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})", + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(node_memory_MemTotal_bytes{cluster=\"$cluster\"})", "format": "time_series", "instant": true, "intervalFactor": 2, @@ -415,7 +415,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})", + "expr": "sum(namespace_memory:kube_pod_container_resource_requests:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable{resource=\"memory\",cluster=\"$cluster\"})", "format": "time_series", "instant": true, "intervalFactor": 2, @@ -499,7 +499,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) / sum(kube_node_status_allocatable{resource=\"memory\",cluster=\"$cluster\"})", "format": "time_series", "instant": true, "intervalFactor": 2, @@ -894,7 +894,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(namespace_cpu:kube_pod_container_resource_requests:sum{cluster=\"$cluster\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -903,7 +903,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -912,7 +912,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -921,7 +921,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"cpu\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1321,7 +1321,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1330,7 +1330,7 @@ data: "step": 10 }, { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1339,7 +1339,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1348,7 +1348,7 @@ data: "step": 10 }, { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", resource=\"memory\"}) by (namespace)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1705,7 +1705,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Current Network Usage", "titleSize": "h6" }, { @@ -1745,7 +1745,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1797,19 +1797,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1843,7 +1831,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1901,7 +1889,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Bandwidth", "titleSize": "h6" }, { @@ -1941,7 +1929,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1993,19 +1981,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -2039,7 +2015,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -2097,7 +2073,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Average Container Bandwidth by Namespace", "titleSize": "h6" }, { @@ -2137,7 +2113,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -2189,19 +2165,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -2235,7 +2199,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -2293,7 +2257,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets", "titleSize": "h6" }, { @@ -2333,7 +2297,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -2385,13 +2349,99 @@ data: "show": false } ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] } ], "repeat": null, "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets Dropped", "titleSize": "h6" }, { @@ -2406,8 +2456,9 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", + "decimals": -1, "fill": 10, - "id": 19, + "id": 20, "legend": { "avg": false, "current": false, @@ -2431,12 +2482,12 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "expr": "ceil(sum by(namespace) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\"}[5m]) + rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\"}[5m])))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}namespace{{`}}`}}", @@ -2449,7 +2500,93 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Rate of Transmitted Packets Dropped", + "title": "IOPS(Reads+Writes)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 21, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\"}[5m]) + rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "ThroughPut(Read+Write)", "tooltip": { "shared": false, "sort": 0, @@ -2489,7 +2626,312 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Storage IO", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "sort": { + "col": 4, + "desc": true + }, + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "IOPS(Reads)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "IOPS(Writes)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "IOPS(Reads + Writes)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Throughput(Read)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Throughput(Write)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Throughput(Read + Write)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", + "pattern": "namespace", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum by(namespace) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum by(namespace) (rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum by(namespace) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\"}[5m]) + rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum by(namespace) (rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum by(namespace) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\"}[5m]) + rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Storage IO", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Storage IO - Distribution", "titleSize": "h6" } ], diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml new file mode 100644 index 00000000000..1c02a40bfdd --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml @@ -0,0 +1,2744 @@ +{{- /* +Generated from 'k8s-resources-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-namespace.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "100px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from requests)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from limits)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation (from requests)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation (from limits)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Headlines", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hiddenSeries": true, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hiddenSeries": true, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hiddenSeries": true, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hiddenSeries": true, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Usage (RSS)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Cache)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Swap)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 9, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Current Receive Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Transmit Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Network Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Network Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 11, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Rate of Packets", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Rate of Packets Dropped", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": -1, + "fill": 10, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "ceil(sum by(pod) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]) + rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m])))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "IOPS(Reads+Writes)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 17, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]) + rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "ThroughPut(Read+Write)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Storage IO", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "sort": { + "col": 4, + "desc": true + }, + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "IOPS(Reads)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "IOPS(Writes)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "IOPS(Reads + Writes)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Throughput(Read)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Throughput(Write)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Throughput(Read + Write)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum by(pod) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum by(pod) (rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum by(pod) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]) + rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum by(pod) (rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]) + rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Storage IO", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Storage IO - Distribution", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Compute Resources / Namespace (Pods)", + "uid": "85a562078cdf77779eaa1add43ccec1e", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-node.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-node.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-node.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-node.yaml index ce6628ae585..cd4eca510bc 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-node.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-node.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -321,7 +321,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", node=~\"$node\", resource=\"cpu\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -330,7 +330,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", node=~\"$node\", resource=\"cpu\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -339,7 +339,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", node=~\"$node\", resource=\"cpu\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -348,7 +348,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", node=~\"$node\", resource=\"cpu\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -749,7 +749,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", node=~\"$node\", resource=\"memory\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -758,7 +758,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{node=~\"$node\"}) by (pod)", + "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", node=~\"$node\", resource=\"memory\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -767,7 +767,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", node=~\"$node\", resource=\"memory\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -776,7 +776,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{node=~\"$node\"}) by (pod)", + "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", node=~\"$node\", resource=\"memory\"}) by (pod)", "format": "table", "instant": true, "intervalFactor": 2, @@ -901,7 +901,7 @@ data: ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", @@ -928,7 +928,7 @@ data: ], "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, node)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml similarity index 82% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml index 40355c6b895..88ac3d5c060 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml @@ -1,15 +1,15 @@ {{- /* -Generated from 'k8s-resources-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Generated from 'k8s-resources-pod' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: namespace: {{ .Values.grafana.defaultDashboards.namespace }} - name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }} annotations: {{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} labels: @@ -19,7 +19,7 @@ metadata: app: {{ template "kube-prometheus-stack.name" $ }}-grafana {{ include "kube-prometheus-stack.labels" $ | indent 4 }} data: - k8s-resources-namespace.json: |- + k8s-resources-pod.json: |- { "annotations": { "list": [ @@ -37,7 +37,7 @@ data: "rows": [ { "collapse": false, - "height": "100px", + "height": "250px", "panels": [ { "aliasColors": { @@ -47,8 +47,7 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 1, - "format": "percentunit", + "fill": 10, "id": 1, "legend": { "avg": false, @@ -60,7 +59,7 @@ data: "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 0, "links": [ ], @@ -70,199 +69,67 @@ data: "points": false, "renderer": "flot", "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"})", - "format": "time_series", - "instant": true, - "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "CPU Utilisation (from requests)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true + "alias": "requests", + "color": "#F2495C", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false }, { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false + "alias": "limits", + "color": "#FF9830", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - ], "spaceLength": 10, - "span": 3, - "stack": false, + "span": 12, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"})", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod=\"$pod\", cluster=\"$cluster\"}) by (container)", "format": "time_series", - "instant": true, "intervalFactor": 2, - "refId": "A" - } - ], - "thresholds": "70,80", - "timeFrom": null, - "timeShift": null, - "title": "CPU Utilisation (from limits)", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "singlestat", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true + "legendFormat": "{{`{{`}}container{{`}}`}}", + "legendLink": null, + "step": 10 }, { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - }, - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 3, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 3, - "stack": false, - "steppedLine": false, - "targets": [ + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "requests", + "legendLink": null, + "step": 10 + }, { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"})", + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}\n)\n", "format": "time_series", - "instant": true, "intervalFactor": 2, - "refId": "A" + "legendFormat": "limits", + "legendLink": null, + "step": 10 } ], - "thresholds": "70,80", + "thresholds": [ + + ], "timeFrom": null, "timeShift": null, - "title": "Memory Utilization (from requests)", + "title": "CPU Usage", "tooltip": { "shared": false, "sort": 0, "value_type": "individual" }, - "type": "singlestat", + "type": "graph", "xaxis": { "buckets": null, "mode": "time", @@ -290,7 +157,19 @@ data: "show": false } ] - }, + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ { "aliasColors": { @@ -299,20 +178,19 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 1, - "format": "percentunit", - "id": 4, + "fill": 10, + "id": 2, "legend": { "avg": false, - "current": false, - "max": false, + "current": true, + "max": true, "min": false, "show": true, "total": false, "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 0, "links": [ ], @@ -325,28 +203,38 @@ data: ], "spaceLength": 10, - "span": 3, - "stack": false, + "span": 12, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"})", + "expr": "sum(increase(container_cpu_cfs_throttled_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", cluster=\"$cluster\"}[5m])) by (container) /sum(increase(container_cpu_cfs_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\", cluster=\"$cluster\"}[5m])) by (container)", "format": "time_series", - "instant": true, "intervalFactor": 2, - "refId": "A" + "legendFormat": "{{`{{`}}container{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 0.25, + "yaxis": "left" } ], - "thresholds": "70,80", "timeFrom": null, "timeShift": null, - "title": "Memory Utilisation (from limits)", + "title": "CPU Throttling", "tooltip": { "shared": false, "sort": 0, "value_type": "individual" }, - "type": "singlestat", + "type": "graph", "xaxis": { "buckets": null, "mode": "time", @@ -358,10 +246,10 @@ data: }, "yaxes": [ { - "format": "short", + "format": "percentunit", "label": null, "logBase": 1, - "max": null, + "max": 1, "min": 0, "show": true }, @@ -379,8 +267,8 @@ data: "repeat": null, "repeatIteration": null, "repeatRowId": null, - "showTitle": false, - "title": "Headlines", + "showTitle": true, + "title": "CPU Throttling", "titleSize": "h6" }, { @@ -395,8 +283,8 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 10, - "id": 5, + "fill": 1, + "id": 3, "legend": { "avg": false, "current": false, @@ -407,7 +295,7 @@ data: "values": false }, "lines": true, - "linewidth": 0, + "linewidth": 1, "links": [ ], @@ -417,151 +305,18 @@ data: "points": false, "renderer": "flot", "seriesOverrides": [ - { - "alias": "quota - requests", - "color": "#F2495C", - "dashes": true, - "fill": 0, - "hideTooltip": true, - "legend": false, - "linewidth": 2, - "stack": false - }, - { - "alias": "quota - limits", - "color": "#FF9830", - "dashes": true, - "fill": 0, - "hideTooltip": true, - "legend": false, - "linewidth": 2, - "stack": false - } + ], "spaceLength": 10, "span": 12, - "stack": true, + "stack": false, "steppedLine": false, - "targets": [ + "styles": [ { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, - "step": 10 - }, - { - "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.cpu\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "quota - requests", - "legendLink": null, - "step": 10 - }, - { - "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.cpu\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "quota - limits", - "legendLink": null, - "step": 10 - } - ], - "thresholds": [ - - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU Usage", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [ - - ] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "CPU Usage", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ - { - "aliasColors": { - - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "id": 6, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [ - - ], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - - ], - "spaceLength": 10, - "span": 12, - "stack": false, - "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" }, { "alias": "CPU Usage", @@ -659,18 +414,18 @@ data: "unit": "percentunit" }, { - "alias": "Pod", + "alias": "Container", "colorMode": null, "colors": [ ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, - "link": true, + "link": false, "linkTargetBlank": false, "linkTooltip": "Drill down", - "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", + "linkUrl": "", + "pattern": "container", "thresholds": [ ], @@ -695,7 +450,7 @@ data: ], "targets": [ { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -704,7 +459,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -713,7 +468,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -722,7 +477,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -731,7 +486,7 @@ data: "step": 10 }, { - "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -802,7 +557,7 @@ data: "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 7, + "id": 4, "legend": { "avg": false, "current": false, @@ -824,22 +579,22 @@ data: "renderer": "flot", "seriesOverrides": [ { - "alias": "quota - requests", + "alias": "requests", "color": "#F2495C", "dashes": true, "fill": 0, "hideTooltip": true, - "legend": false, + "legend": true, "linewidth": 2, "stack": false }, { - "alias": "quota - limits", + "alias": "limits", "color": "#FF9830", "dashes": true, "fill": 0, "hideTooltip": true, - "legend": false, + "legend": true, "linewidth": 2, "stack": false } @@ -850,26 +605,26 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}) by (pod)", + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\", image!=\"\"}) by (container)", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendFormat": "{{`{{`}}container{{`}}`}}", "legendLink": null, "step": 10 }, { - "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.memory\"})", + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}\n)\n", "format": "time_series", "intervalFactor": 2, - "legendFormat": "quota - requests", + "legendFormat": "requests", "legendLink": null, "step": 10 }, { - "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.memory\"})", + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}\n)\n", "format": "time_series", "intervalFactor": 2, - "legendFormat": "quota - limits", + "legendFormat": "limits", "legendLink": null, "step": 10 } @@ -879,7 +634,7 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Memory Usage (w/o cache)", + "title": "Memory Usage (WSS)", "tooltip": { "shared": false, "sort": 0, @@ -935,7 +690,7 @@ data: "dashes": false, "datasource": "$datasource", "fill": 1, - "id": 8, + "id": 5, "legend": { "avg": false, "current": false, @@ -970,7 +725,7 @@ data: "type": "hidden" }, { - "alias": "Memory Usage", + "alias": "Memory Usage (WSS)", "colorMode": null, "colors": [ @@ -1122,18 +877,18 @@ data: "unit": "bytes" }, { - "alias": "Pod", + "alias": "Container", "colorMode": null, "colors": [ ], "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, - "link": true, + "link": false, "linkTargetBlank": false, "linkTooltip": "Drill down", - "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", + "linkUrl": "", + "pattern": "container", "thresholds": [ ], @@ -1158,7 +913,7 @@ data: ], "targets": [ { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod)", + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\", image!=\"\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1167,7 +922,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1176,7 +931,7 @@ data: "step": 10 }, { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", image!=\"\"}) by (container) / sum(kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1185,7 +940,7 @@ data: "step": 10 }, { - "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "expr": "sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1194,7 +949,7 @@ data: "step": 10 }, { - "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\", image!=\"\"}) by (container) / sum(kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1203,7 +958,7 @@ data: "step": 10 }, { - "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1212,7 +967,7 @@ data: "step": 10 }, { - "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1221,7 +976,7 @@ data: "step": 10 }, { - "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", "format": "table", "instant": true, "intervalFactor": 2, @@ -1291,8 +1046,8 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 1, - "id": 9, + "fill": 10, + "id": 6, "interval": "1m", "legend": { "avg": false, @@ -1304,7 +1059,7 @@ data: "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 0, "links": [ ], @@ -1317,218 +1072,289 @@ data: ], "spaceLength": 10, - "span": 12, - "stack": false, + "span": 6, + "stack": true, "steppedLine": false, - "styles": [ - { - "alias": "Time", - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "pattern": "Time", - "type": "hidden" - }, + "targets": [ { - "alias": "Current Receive Bandwidth", - "colorMode": null, - "colors": [ + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #A", - "thresholds": [ + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ - ], - "type": "number", - "unit": "Bps" - }, + ] + }, + "yaxes": [ { - "alias": "Current Transmit Bandwidth", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #B", - "thresholds": [ - - ], - "type": "number", - "unit": "Bps" + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true }, { - "alias": "Rate of Received Packets", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #C", - "thresholds": [ + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { - ], - "type": "number", - "unit": "pps" - }, - { - "alias": "Rate of Transmitted Packets", - "colorMode": null, - "colors": [ + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #D", - "thresholds": [ + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ - ], - "type": "number", - "unit": "pps" - }, + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ { - "alias": "Rate of Received Packets Dropped", - "colorMode": null, - "colors": [ + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #E", - "thresholds": [ + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ - ], - "type": "number", - "unit": "pps" - }, + ] + }, + "yaxes": [ { - "alias": "Rate of Transmitted Packets Dropped", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": false, - "linkTargetBlank": false, - "linkTooltip": "Drill down", - "linkUrl": "", - "pattern": "Value #F", - "thresholds": [ - - ], - "type": "number", - "unit": "pps" + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true }, { - "alias": "Pod", - "colorMode": null, - "colors": [ - - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "link": true, - "linkTargetBlank": false, - "linkTooltip": "Drill down to pods", - "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", - "pattern": "pod", - "thresholds": [ + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { - ], - "type": "number", - "unit": "short" - }, - { - "alias": "", - "colorMode": null, - "colors": [ + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 8, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ - ], - "dateFormat": "YYYY-MM-DD HH:mm:ss", - "decimals": 2, - "pattern": "/.*/", - "thresholds": [ + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ - ], - "type": "string", - "unit": "short" - } ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 10 - }, - { - "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "B", - "step": 10 - }, - { - "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", - "format": "table", - "instant": true, + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", "intervalFactor": 2, - "legendFormat": "", - "refId": "C", + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, "step": 10 - }, + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ { - "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "D", - "step": 10 + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true }, { - "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", - "format": "table", - "instant": true, - "intervalFactor": 2, - "legendFormat": "", - "refId": "E", - "step": 10 - }, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ { - "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", - "format": "table", - "instant": true, + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", "intervalFactor": 2, - "legendFormat": "", - "refId": "F", + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, "step": 10 } ], @@ -1537,14 +1363,13 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Current Network Usage", + "title": "Rate of Transmitted Packets", "tooltip": { "shared": false, "sort": 0, "value_type": "individual" }, - "transform": "table", - "type": "table", + "type": "graph", "xaxis": { "buckets": null, "mode": "time", @@ -1556,7 +1381,7 @@ data: }, "yaxes": [ { - "format": "short", + "format": "Bps", "label": null, "logBase": 1, "max": null, @@ -1578,7 +1403,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets", "titleSize": "h6" }, { @@ -1595,6 +1420,7 @@ data: "datasource": "$datasource", "fill": 10, "id": 10, + "interval": "1m", "legend": { "avg": false, "current": false, @@ -1618,12 +1444,12 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}pod{{`}}`}}", @@ -1636,7 +1462,7 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Receive Bandwidth", + "title": "Rate of Received Packets Dropped", "tooltip": { "shared": false, "sort": 0, @@ -1670,19 +1496,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1693,6 +1507,7 @@ data: "datasource": "$datasource", "fill": 10, "id": 11, + "interval": "1m", "legend": { "avg": false, "current": false, @@ -1716,12 +1531,12 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}pod{{`}}`}}", @@ -1734,7 +1549,7 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Transmit Bandwidth", + "title": "Rate of Transmitted Packets Dropped", "tooltip": { "shared": false, "sort": 0, @@ -1774,7 +1589,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets Dropped", "titleSize": "h6" }, { @@ -1789,6 +1604,7 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", + "decimals": -1, "fill": 10, "id": 12, "legend": { @@ -1814,15 +1630,23 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "expr": "ceil(sum by(pod) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[5m])))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendFormat": "Reads", + "legendLink": null, + "step": 10 + }, + { + "expr": "ceil(sum by(pod) (rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[5m])))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Writes", "legendLink": null, "step": 10 } @@ -1832,7 +1656,101 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Rate of Received Packets", + "title": "IOPS", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(pod) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Reads", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum by(pod) (rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Writes", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "ThroughPut", "tooltip": { "shared": false, "sort": 0, @@ -1872,7 +1790,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Storage IO - Distribution(Pod - Read & Writes)", "titleSize": "h6" }, { @@ -1887,8 +1805,9 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", + "decimals": -1, "fill": 10, - "id": 13, + "id": 14, "legend": { "avg": false, "current": false, @@ -1912,15 +1831,15 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "expr": "ceil(sum by(container) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]) + rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m])))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendFormat": "{{`{{`}}container{{`}}`}}", "legendLink": null, "step": 10 } @@ -1930,7 +1849,7 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Rate of Transmitted Packets", + "title": "IOPS(Reads+Writes)", "tooltip": { "shared": false, "sort": 0, @@ -1948,7 +1867,7 @@ data: }, "yaxes": [ { - "format": "Bps", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -1964,19 +1883,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1986,7 +1893,7 @@ data: "dashes": false, "datasource": "$datasource", "fill": 10, - "id": 14, + "id": 15, "legend": { "avg": false, "current": false, @@ -2010,15 +1917,15 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ { - "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "expr": "sum by(container) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]) + rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]))", "format": "time_series", "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendFormat": "{{`{{`}}container{{`}}`}}", "legendLink": null, "step": 10 } @@ -2028,7 +1935,7 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Rate of Received Packets Dropped", + "title": "ThroughPut(Read+Write)", "tooltip": { "shared": false, "sort": 0, @@ -2068,7 +1975,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Storage IO - Distribution(Containers)", "titleSize": "h6" }, { @@ -2083,8 +1990,8 @@ data: "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 10, - "id": 15, + "fill": 1, + "id": 16, "legend": { "avg": false, "current": false, @@ -2095,7 +2002,7 @@ data: "values": false }, "lines": true, - "linewidth": 0, + "linewidth": 1, "links": [ ], @@ -2107,17 +2014,223 @@ data: "seriesOverrides": [ ], + "sort": { + "col": 4, + "desc": true + }, "spaceLength": 10, "span": 12, - "stack": true, + "stack": false, "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "IOPS(Reads)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "IOPS(Writes)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "IOPS(Reads + Writes)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": -1, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Throughput(Read)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Throughput(Write)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Throughput(Read + Write)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Container", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "container", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], "targets": [ { - "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", - "format": "time_series", + "expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]))", + "format": "table", + "instant": true, "intervalFactor": 2, - "legendFormat": "{{`{{`}}pod{{`}}`}}", - "legendLink": null, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum by(container) (rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum by(container) (rate(container_fs_reads_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]) + rate(container_fs_writes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum by(container) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum by(container) (rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum by(container) (rate(container_fs_reads_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]) + rate(container_fs_writes_bytes_total{container!=\"\", cluster=\"$cluster\",namespace=~\"$namespace\", pod=\"$pod\"}[5m]))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", "step": 10 } ], @@ -2126,13 +2239,14 @@ data: ], "timeFrom": null, "timeShift": null, - "title": "Rate of Transmitted Packets Dropped", + "title": "Current Storage IO", "tooltip": { "shared": false, "sort": 0, "value_type": "individual" }, - "type": "graph", + "transform": "table", + "type": "table", "xaxis": { "buckets": null, "mode": "time", @@ -2144,7 +2258,7 @@ data: }, "yaxes": [ { - "format": "Bps", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -2166,7 +2280,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Storage IO - Distribution", "titleSize": "h6" } ], @@ -2209,7 +2323,7 @@ data: ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", @@ -2236,7 +2350,34 @@ data: ], "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "pod", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", @@ -2279,8 +2420,8 @@ data: ] }, "timezone": "UTC", - "title": "Kubernetes / Compute Resources / Namespace (Pods)", - "uid": "85a562078cdf77779eaa1add43ccec1e", + "title": "Kubernetes / Compute Resources / Pod", + "uid": "6581e46e4e5c7ba40a07646395ef7b23", "version": 0 } {{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml index 931934f23f0..1b1bb7e99f6 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -321,7 +321,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -330,7 +330,7 @@ data: "step": 10 }, { - "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -339,7 +339,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -348,7 +348,7 @@ data: "step": 10 }, { - "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -692,7 +692,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -701,7 +701,7 @@ data: "step": 10 }, { - "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -710,7 +710,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -719,7 +719,7 @@ data: "step": 10 }, { - "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -1076,7 +1076,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Current Network Usage", "titleSize": "h6" }, { @@ -1116,7 +1116,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1168,19 +1168,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1214,7 +1202,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1272,7 +1260,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Bandwidth", "titleSize": "h6" }, { @@ -1312,7 +1300,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1364,19 +1352,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1410,7 +1386,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1468,7 +1444,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Average Container Bandwidth by Pod", "titleSize": "h6" }, { @@ -1508,7 +1484,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1560,19 +1536,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1606,7 +1570,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1664,7 +1628,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets", "titleSize": "h6" }, { @@ -1704,7 +1668,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1756,19 +1720,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1802,7 +1754,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1860,7 +1812,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets Dropped", "titleSize": "h6" } ], @@ -1903,7 +1855,7 @@ data: ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", @@ -1930,7 +1882,7 @@ data: ], "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", @@ -1957,7 +1909,7 @@ data: ], "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\"}, workload)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", @@ -1984,7 +1936,7 @@ data: ], "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\"}, workload_type)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml index dd3b2506545..fe33c10c81b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -74,8 +74,9 @@ data: "color": "#F2495C", "dashes": true, "fill": 0, + "hiddenSeries": true, "hideTooltip": true, - "legend": false, + "legend": true, "linewidth": 2, "stack": false }, @@ -84,8 +85,9 @@ data: "color": "#FF9830", "dashes": true, "fill": 0, + "hiddenSeries": true, "hideTooltip": true, - "legend": false, + "legend": true, "linewidth": 2, "stack": false } @@ -403,7 +405,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -412,7 +414,7 @@ data: "step": 10 }, { - "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -421,7 +423,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -430,7 +432,7 @@ data: "step": 10 }, { - "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -527,8 +529,9 @@ data: "color": "#F2495C", "dashes": true, "fill": 0, + "hiddenSeries": true, "hideTooltip": true, - "legend": false, + "legend": true, "linewidth": 2, "stack": false }, @@ -537,8 +540,9 @@ data: "color": "#FF9830", "dashes": true, "fill": 0, + "hiddenSeries": true, "hideTooltip": true, - "legend": false, + "legend": true, "linewidth": 2, "stack": false } @@ -856,7 +860,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -865,7 +869,7 @@ data: "step": 10 }, { - "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -874,7 +878,7 @@ data: "step": 10 }, { - "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -883,7 +887,7 @@ data: "step": 10 }, { - "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits{cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", "format": "table", "instant": true, "intervalFactor": 2, @@ -1259,7 +1263,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Current Network Usage", "titleSize": "h6" }, { @@ -1299,7 +1303,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1351,19 +1355,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1397,7 +1389,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1455,7 +1447,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Bandwidth", "titleSize": "h6" }, { @@ -1495,7 +1487,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1547,19 +1539,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1593,7 +1573,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1651,7 +1631,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Average Container Bandwidth by Workload", "titleSize": "h6" }, { @@ -1691,7 +1671,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1743,19 +1723,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1789,7 +1757,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1847,7 +1815,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets", "titleSize": "h6" }, { @@ -1887,7 +1855,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -1939,19 +1907,7 @@ data: "show": false } ] - } - ], - "repeat": null, - "repeatIteration": null, - "repeatRowId": null, - "showTitle": true, - "title": "Network", - "titleSize": "h6" - }, - { - "collapse": false, - "height": "250px", - "panels": [ + }, { "aliasColors": { @@ -1985,7 +1941,7 @@ data: ], "spaceLength": 10, - "span": 12, + "span": 6, "stack": true, "steppedLine": false, "targets": [ @@ -2043,7 +1999,7 @@ data: "repeatIteration": null, "repeatRowId": null, "showTitle": true, - "title": "Network", + "title": "Rate of Packets Dropped", "titleSize": "h6" } ], @@ -2072,28 +2028,23 @@ data: }, { "allValue": null, - "auto": false, - "auto_count": 30, - "auto_min": "10s", "current": { - "text": "deployment", - "value": "deployment" + "text": "", + "value": "" }, "datasource": "$datasource", - "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", - "hide": 0, + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, "includeAll": false, "label": null, "multi": false, - "name": "type", + "name": "cluster", "options": [ ], - "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", - "refresh": 1, + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, "regex": "", - "skipUrlSync": false, - "sort": 0, + "sort": 1, "tagValuesQuery": "", "tags": [ @@ -2104,23 +2055,28 @@ data: }, { "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", "current": { - "text": "", - "value": "" + "text": "deployment", + "value": "deployment" }, "datasource": "$datasource", - "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "hide": 0, "includeAll": false, "label": null, "multi": false, - "name": "cluster", + "name": "type", "options": [ ], - "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "refresh": 2, "regex": "", - "sort": 1, + "skipUrlSync": false, + "sort": 0, "tagValuesQuery": "", "tags": [ @@ -2145,7 +2101,7 @@ data: ], "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 1, "tagValuesQuery": "", diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/kubelet.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/kubelet.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/kubelet.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/kubelet.yaml index f72ff58752f..b70e7191936 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/kubelet.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/kubelet.yaml @@ -4,7 +4,8 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubelet.enabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} +{{- if (include "exporter.kubelet.enabled" .) }} apiVersion: v1 kind: ConfigMap metadata: @@ -107,7 +108,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(up{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\"})", + "expr": "sum(kubelet_node_name{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -115,7 +116,7 @@ data: } ], "thresholds": "", - "title": "Up", + "title": "Running Kubelets", "tooltip": { "shared": false }, @@ -191,7 +192,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(kubelet_running_pods{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}) OR sum(kubelet_running_pod_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"})", + "expr": "sum(kubelet_running_pods{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}) OR sum(kubelet_running_pod_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -275,7 +276,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(kubelet_running_containers{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}) OR sum(kubelet_running_container_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"})", + "expr": "sum(kubelet_running_containers{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}) OR sum(kubelet_running_container_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -359,7 +360,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(volume_manager_total_volumes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\", state=\"actual_state_of_world\"})", + "expr": "sum(volume_manager_total_volumes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\", state=\"actual_state_of_world\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -443,7 +444,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(volume_manager_total_volumes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",state=\"desired_state_of_world\"})", + "expr": "sum(volume_manager_total_volumes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\",state=\"desired_state_of_world\"})", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -527,7 +528,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(rate(kubelet_node_config_error{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m]))", + "expr": "sum(rate(kubelet_node_config_error{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -609,7 +610,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(kubelet_runtime_operations_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (operation_type, instance)", + "expr": "sum(rate(kubelet_runtime_operations_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (operation_type, instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", @@ -702,7 +703,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(kubelet_runtime_operations_errors_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_type)", + "expr": "sum(rate(kubelet_runtime_operations_errors_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", @@ -808,7 +809,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_runtime_operations_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_type, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_runtime_operations_duration_seconds_bucket{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_type, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", @@ -914,14 +915,14 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(kubelet_pod_start_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance)", + "expr": "sum(rate(kubelet_pod_start_duration_seconds_count{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} pod", "refId": "A" }, { - "expr": "sum(rate(kubelet_pod_worker_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance)", + "expr": "sum(rate(kubelet_pod_worker_duration_seconds_count{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} worker", @@ -1014,14 +1015,14 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_start_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_start_duration_seconds_count{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} pod", "refId": "A" }, { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} worker", @@ -1129,7 +1130,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(storage_operation_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin)", + "expr": "sum(rate(storage_operation_duration_seconds_count{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_name{{`}}`}} {{`{{`}}volume_plugin{{`}}`}}", @@ -1224,7 +1225,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(storage_operation_errors_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin)", + "expr": "sum(rate(storage_operation_errors_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_name{{`}}`}} {{`{{`}}volume_plugin{{`}}`}}", @@ -1332,7 +1333,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(storage_operation_duration_seconds_bucket{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin, le))", + "expr": "histogram_quantile(0.99, sum(rate(storage_operation_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_name{{`}}`}} {{`{{`}}volume_plugin{{`}}`}}", @@ -1438,7 +1439,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(kubelet_cgroup_manager_duration_seconds_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_type)", + "expr": "sum(rate(kubelet_cgroup_manager_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_type)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}operation_type{{`}}`}}", @@ -1531,7 +1532,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_cgroup_manager_duration_seconds_bucket{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_type, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_cgroup_manager_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_type, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", @@ -1638,7 +1639,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(kubelet_pleg_relist_duration_seconds_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance)", + "expr": "sum(rate(kubelet_pleg_relist_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1731,7 +1732,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_interval_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_interval_seconds_bucket{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1837,7 +1838,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1943,28 +1944,28 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"2..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"2..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "2xx", "refId": "A" }, { - "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"3..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"3..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "3xx", "refId": "B" }, { - "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"4..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"4..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "4xx", "refId": "C" }, { - "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"5..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"5..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "5xx", @@ -2070,7 +2071,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, verb, url, le))", + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, verb, url, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", @@ -2176,7 +2177,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "process_resident_memory_bytes{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}", + "expr": "process_resident_memory_bytes{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -2269,7 +2270,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "rate(process_cpu_seconds_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])", + "expr": "rate(process_cpu_seconds_total{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -2362,7 +2363,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "go_goroutines{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}", + "expr": "go_goroutines{cluster=\"$cluster\",job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -2482,7 +2483,7 @@ data: "options": [ ], - "query": "label_values(kubelet_runtime_operations_total{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\"}, instance)", + "query": "label_values(kubelet_runtime_operations_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\"}, instance)", "refresh": 2, "regex": "", "sort": 1, @@ -2530,4 +2531,5 @@ data: "uid": "3138fa155d5915769fbded898ac09fd9", "version": 0 } +{{- end }} {{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/namespace-by-pod.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/namespace-by-pod.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/namespace-by-pod.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/namespace-by-pod.yaml index 20def0a8df6..e7ba030564f 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/namespace-by-pod.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/namespace-by-pod.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -1295,14 +1295,14 @@ data: "datasource": "$datasource", "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, "includeAll": false, - "label": "cluster", + "label": null, "multi": false, "name": "cluster", "options": [ ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 0, "tagValuesQuery": "", @@ -1333,7 +1333,7 @@ data: ], "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, @@ -1461,4 +1461,4 @@ data: "uid": "8b7a8b326d7a6f1f04244066368c67af", "version": 0 } -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/namespace-by-workload.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/namespace-by-workload.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/namespace-by-workload.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/namespace-by-workload.yaml index adecffa0976..00b8fc73ec0 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/namespace-by-workload.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/namespace-by-workload.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -1535,14 +1535,14 @@ data: "datasource": "$datasource", "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, "includeAll": false, - "label": "cluster", + "label": null, "multi": false, "name": "cluster", "options": [ ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 0, "tagValuesQuery": "", @@ -1573,7 +1573,7 @@ data: ], "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, @@ -1605,7 +1605,7 @@ data: ], "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 0, @@ -1733,4 +1733,4 @@ data: "uid": "bbb2a765a623ae38130206c7d94a160f", "version": 0 } -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml index 7ef72f97a60..4d4d8072539 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.nodeExporter.enabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled .Values.nodeExporter.enabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -77,7 +77,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "(\n instance:node_cpu_utilisation:rate1m{job=\"node-exporter\"}\n*\n instance:node_num_cpu:sum{job=\"node-exporter\"}\n)\n/ scalar(sum(instance:node_num_cpu:sum{job=\"node-exporter\"}))\n", + "expr": "(\n instance:node_cpu_utilisation:rate5m{job=\"node-exporter\"}\n*\n instance:node_num_cpu:sum{job=\"node-exporter\"}\n)\n/ scalar(sum(instance:node_num_cpu:sum{job=\"node-exporter\"}))\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -92,7 +92,7 @@ data: "timeShift": null, "title": "CPU Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -178,7 +178,7 @@ data: "timeShift": null, "title": "CPU Saturation (load1 per CPU)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -276,7 +276,7 @@ data: "timeShift": null, "title": "Memory Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -347,7 +347,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance:node_vmstat_pgmajfault:rate1m{job=\"node-exporter\"}", + "expr": "instance:node_vmstat_pgmajfault:rate5m{job=\"node-exporter\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -362,7 +362,7 @@ data: "timeShift": null, "title": "Memory Saturation (Major Page Faults)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -453,7 +453,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance:node_network_receive_bytes_excluding_lo:rate1m{job=\"node-exporter\"}", + "expr": "instance:node_network_receive_bytes_excluding_lo:rate5m{job=\"node-exporter\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} Receive", @@ -461,7 +461,7 @@ data: "step": 10 }, { - "expr": "instance:node_network_transmit_bytes_excluding_lo:rate1m{job=\"node-exporter\"}", + "expr": "instance:node_network_transmit_bytes_excluding_lo:rate5m{job=\"node-exporter\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} Transmit", @@ -476,7 +476,7 @@ data: "timeShift": null, "title": "Net Utilisation (Bytes Receive/Transmit)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -555,7 +555,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance:node_network_receive_drop_excluding_lo:rate1m{job=\"node-exporter\"}", + "expr": "instance:node_network_receive_drop_excluding_lo:rate5m{job=\"node-exporter\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} Receive", @@ -563,7 +563,7 @@ data: "step": 10 }, { - "expr": "instance:node_network_transmit_drop_excluding_lo:rate1m{job=\"node-exporter\"}", + "expr": "instance:node_network_transmit_drop_excluding_lo:rate5m{job=\"node-exporter\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} Transmit", @@ -578,7 +578,7 @@ data: "timeShift": null, "title": "Net Saturation (Drops Receive/Transmit)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -661,7 +661,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance_device:node_disk_io_time_seconds:rate1m{job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_seconds:rate1m{job=\"node-exporter\"}))\n", + "expr": "instance_device:node_disk_io_time_seconds:rate5m{job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_seconds:rate5m{job=\"node-exporter\"}))\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}device{{`}}`}}", @@ -676,7 +676,7 @@ data: "timeShift": null, "title": "Disk IO Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -747,7 +747,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance_device:node_disk_io_time_weighted_seconds:rate1m{job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate1m{job=\"node-exporter\"}))\n", + "expr": "instance_device:node_disk_io_time_weighted_seconds:rate5m{job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate5m{job=\"node-exporter\"}))\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}device{{`}}`}}", @@ -762,7 +762,7 @@ data: "timeShift": null, "title": "Disk IO Saturation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -860,7 +860,7 @@ data: "timeShift": null, "title": "Disk Space Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/node-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/node-rsrc-use.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/node-rsrc-use.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/node-rsrc-use.yaml index 9defce09112..ddf651652de 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/node-rsrc-use.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/node-rsrc-use.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.nodeExporter.enabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled .Values.nodeExporter.enabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -77,7 +77,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance:node_cpu_utilisation:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance:node_cpu_utilisation:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "Utilisation", @@ -92,7 +92,7 @@ data: "timeShift": null, "title": "CPU Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -178,7 +178,7 @@ data: "timeShift": null, "title": "CPU Saturation (Load1 per CPU)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -276,7 +276,7 @@ data: "timeShift": null, "title": "Memory Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -347,7 +347,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance:node_vmstat_pgmajfault:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance:node_vmstat_pgmajfault:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "Major page faults", @@ -362,7 +362,7 @@ data: "timeShift": null, "title": "Memory Saturation (Major Page Faults)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -453,7 +453,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance:node_network_receive_bytes_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance:node_network_receive_bytes_excluding_lo:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "Receive", @@ -461,7 +461,7 @@ data: "step": 10 }, { - "expr": "instance:node_network_transmit_bytes_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance:node_network_transmit_bytes_excluding_lo:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "Transmit", @@ -476,7 +476,7 @@ data: "timeShift": null, "title": "Net Utilisation (Bytes Receive/Transmit)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -555,7 +555,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance:node_network_receive_drop_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance:node_network_receive_drop_excluding_lo:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "Receive drops", @@ -563,7 +563,7 @@ data: "step": 10 }, { - "expr": "instance:node_network_transmit_drop_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance:node_network_transmit_drop_excluding_lo:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "Transmit drops", @@ -578,7 +578,7 @@ data: "timeShift": null, "title": "Net Saturation (Drops Receive/Transmit)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -661,7 +661,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance_device:node_disk_io_time_seconds:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance_device:node_disk_io_time_seconds:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}device{{`}}`}}", @@ -676,7 +676,7 @@ data: "timeShift": null, "title": "Disk IO Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -747,7 +747,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "instance_device:node_disk_io_time_weighted_seconds:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "expr": "instance_device:node_disk_io_time_weighted_seconds:rate5m{job=\"node-exporter\", instance=\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}device{{`}}`}}", @@ -762,7 +762,7 @@ data: "timeShift": null, "title": "Disk IO Saturation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -860,7 +860,7 @@ data: "timeShift": null, "title": "Disk Space Utilisation", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/nodes.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/nodes.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/nodes.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/nodes.yaml index 8c67344c4de..571b5dae7f1 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/nodes.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/nodes.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -92,9 +92,8 @@ data: "steppedLine": false, "targets": [ { - "expr": "(\n (1 - rate(node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"$instance\"}[$__interval]))\n/ ignoring(cpu) group_left\n count without (cpu)( node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"$instance\"})\n)\n", + "expr": "(\n (1 - rate(node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"$instance\"}[$__rate_interval]))\n/ ignoring(cpu) group_left\n count without (cpu)( node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"$instance\"})\n)\n", "format": "time_series", - "interval": "1m", "intervalFactor": 5, "legendFormat": "{{`{{`}}cpu{{`}}`}}", "refId": "A" @@ -528,25 +527,22 @@ data: "steppedLine": false, "targets": [ { - "expr": "rate(node_disk_read_bytes_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__interval])", + "expr": "rate(node_disk_read_bytes_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__rate_interval])", "format": "time_series", - "interval": "1m", "intervalFactor": 2, "legendFormat": "{{`{{`}}device{{`}}`}} read", "refId": "A" }, { - "expr": "rate(node_disk_written_bytes_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__interval])", + "expr": "rate(node_disk_written_bytes_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__rate_interval])", "format": "time_series", - "interval": "1m", "intervalFactor": 2, "legendFormat": "{{`{{`}}device{{`}}`}} written", "refId": "B" }, { - "expr": "rate(node_disk_io_time_seconds_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__interval])", + "expr": "rate(node_disk_io_time_seconds_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__rate_interval])", "format": "time_series", - "interval": "1m", "intervalFactor": 2, "legendFormat": "{{`{{`}}device{{`}}`}} io time", "refId": "C" @@ -758,9 +754,8 @@ data: "steppedLine": false, "targets": [ { - "expr": "rate(node_network_receive_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!=\"lo\"}[$__interval])", + "expr": "rate(node_network_receive_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!=\"lo\"}[$__rate_interval])", "format": "time_series", - "interval": "1m", "intervalFactor": 2, "legendFormat": "{{`{{`}}device{{`}}`}}", "refId": "A" @@ -852,9 +847,8 @@ data: "steppedLine": false, "targets": [ { - "expr": "rate(node_network_transmit_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!=\"lo\"}[$__interval])", + "expr": "rate(node_network_transmit_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!=\"lo\"}[$__rate_interval])", "format": "time_series", - "interval": "1m", "intervalFactor": 2, "legendFormat": "{{`{{`}}device{{`}}`}}", "refId": "A" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml similarity index 87% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml index 180087aa31f..191e5a8608c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -92,14 +92,14 @@ data: "steppedLine": false, "targets": [ { - "expr": "(\n sum without(instance, node) (kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", + "expr": "(\n sum without(instance, node) (topk(1, (kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})))\n -\n sum without(instance, node) (topk(1, (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})))\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Used Space", "refId": "A" }, { - "expr": "sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", + "expr": "sum without(instance, node) (topk(1, (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})))\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Free Space", @@ -207,7 +207,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "max without(instance,node) (\n(\n kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n -\n kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n)\n/\nkubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100)\n", + "expr": "max without(instance,node) (\n(\n topk(1, kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n topk(1, kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n/\ntopk(1, kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n* 100)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -289,14 +289,14 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", + "expr": "sum without(instance, node) (topk(1, (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})))\n", "format": "time_series", "intervalFactor": 1, "legendFormat": "Used inodes", "refId": "A" }, { - "expr": "(\n sum without(instance, node) (kubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", + "expr": "(\n sum without(instance, node) (topk(1, (kubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})))\n -\n sum without(instance, node) (topk(1, (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})))\n)\n", "format": "time_series", "intervalFactor": 1, "legendFormat": " Free inodes", @@ -404,7 +404,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "max without(instance,node) (\nkubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n/\nkubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100)\n", + "expr": "max without(instance,node) (\ntopk(1, kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n/\ntopk(1, kubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n* 100)\n", "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -500,7 +500,7 @@ data: "options": [ ], - "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\"}, namespace)", + "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\"}, namespace)", "refresh": 2, "regex": "", "sort": 1, @@ -526,7 +526,7 @@ data: "options": [ ], - "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\"}, persistentvolumeclaim)", + "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics\", namespace=\"$namespace\"}, persistentvolumeclaim)", "refresh": 2, "regex": "", "sort": 1, diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/pod-total.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/pod-total.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/pod-total.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/pod-total.yaml index 1790df788d5..bf992338fbf 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/pod-total.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/pod-total.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -1027,14 +1027,14 @@ data: "datasource": "$datasource", "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, "includeAll": false, - "label": "cluster", + "label": null, "multi": false, "name": "cluster", "options": [ ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 0, "tagValuesQuery": "", @@ -1065,7 +1065,7 @@ data: ], "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, @@ -1097,7 +1097,7 @@ data: ], "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}, pod)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, @@ -1225,4 +1225,4 @@ data: "uid": "7a18067ce943a40ae25454675c19ff5c", "version": 0 } -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml index 89c6c4be1a2..da334368371 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.prometheus.prometheusSpec.remoteWriteDashboards }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled .Values.prometheus.prometheusSpec.remoteWriteDashboards }} apiVersion: v1 kind: ConfigMap metadata: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/prometheus.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/prometheus.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/prometheus.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/prometheus.yaml index f3292faf259..e2cdd50b74e 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/prometheus.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/prometheus.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -220,7 +220,7 @@ data: "timeShift": null, "title": "Prometheus Stats", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -319,7 +319,7 @@ data: "timeShift": null, "title": "Target Sync", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -405,7 +405,7 @@ data: "timeShift": null, "title": "Targets", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -503,7 +503,7 @@ data: "timeShift": null, "title": "Average Scrape Interval Duration", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -613,7 +613,7 @@ data: "timeShift": null, "title": "Scrape failures", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -699,7 +699,7 @@ data: "timeShift": null, "title": "Appended Samples", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -797,7 +797,7 @@ data: "timeShift": null, "title": "Head Series", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -883,7 +883,7 @@ data: "timeShift": null, "title": "Head Chunks", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -981,7 +981,7 @@ data: "timeShift": null, "title": "Query Rate", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -1067,7 +1067,7 @@ data: "timeShift": null, "title": "Stage Duration", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -1133,7 +1133,7 @@ data: "type": "datasource" }, { - "allValue": null, + "allValue": ".+", "current": { "selected": true, "text": "All", @@ -1161,7 +1161,7 @@ data: "useTags": false }, { - "allValue": null, + "allValue": ".+", "current": { "selected": true, "text": "All", diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/proxy.yaml similarity index 92% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/proxy.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/proxy.yaml index a7cecd5dd5a..074103aa647 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/proxy.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/proxy.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} {{- if (include "exporter.kubeProxy.enabled" .)}} apiVersion: v1 kind: ConfigMap @@ -108,7 +108,11 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(up{job=\"{{ include "exporter.kubeProxy.jobName" . }}\"})", + {{- if .Values.k3sServer.enabled }} + "expr": "sum(up{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", metrics_path=\"/metrics\"})", + {{- else }} + "expr": "sum(up{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\"})", + {{- end }} "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -177,7 +181,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(kubeproxy_sync_proxy_rules_duration_seconds_count{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", + "expr": "sum(rate(kubeproxy_sync_proxy_rules_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "rate", @@ -270,7 +274,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99,rate(kubeproxy_sync_proxy_rules_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", + "expr": "histogram_quantile(0.99,rate(kubeproxy_sync_proxy_rules_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -376,7 +380,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(kubeproxy_network_programming_duration_seconds_count{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", + "expr": "sum(rate(kubeproxy_network_programming_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "rate", @@ -469,7 +473,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -575,28 +579,28 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"2..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"2..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "2xx", "refId": "A" }, { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"3..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"3..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "3xx", "refId": "B" }, { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"4..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"4..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "4xx", "refId": "C" }, { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"5..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"5..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "5xx", @@ -689,7 +693,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\",verb=\"POST\"}[5m])) by (verb, url, le))", + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\",verb=\"POST\"}[5m])) by (verb, url, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", @@ -795,7 +799,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", @@ -901,7 +905,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "process_resident_memory_bytes{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}", + "expr": "process_resident_memory_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -994,7 +998,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "rate(process_cpu_seconds_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}[5m])", + "expr": "rate(process_cpu_seconds_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}[5m])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1087,7 +1091,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "go_goroutines{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}", + "expr": "go_goroutines{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1171,6 +1175,32 @@ data: "allValue": null, "current": { + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + }, "datasource": "$datasource", "hide": 0, @@ -1181,7 +1211,7 @@ data: "options": [ ], - "query": "label_values(kubeproxy_network_programming_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\"}, instance)", + "query": "label_values(kubeproxy_network_programming_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeProxy.jobName" . }}\"}, instance)", "refresh": 2, "regex": "", "sort": 1, diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/scheduler.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/scheduler.yaml similarity index 90% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/scheduler.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/scheduler.yaml index eba5d160cf6..4fe74325315 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/scheduler.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/scheduler.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} {{- if (include "exporter.kubeScheduler.enabled" .)}} apiVersion: v1 kind: ConfigMap @@ -108,7 +108,11 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(up{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\"})", + {{- if .Values.k3sServer.enabled }} + "expr": "sum(up{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", metrics_path=\"/metrics\"})", + {{- else }} + "expr": "sum(up{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\"})", + {{- end }} "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -177,28 +181,28 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(scheduler_e2e_scheduling_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "expr": "sum(rate(scheduler_e2e_scheduling_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} e2e", "refId": "A" }, { - "expr": "sum(rate(scheduler_binding_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "expr": "sum(rate(scheduler_binding_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} binding", "refId": "B" }, { - "expr": "sum(rate(scheduler_scheduling_algorithm_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "expr": "sum(rate(scheduler_scheduling_algorithm_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} scheduling algorithm", "refId": "C" }, { - "expr": "sum(rate(scheduler_volume_scheduling_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "expr": "sum(rate(scheduler_volume_scheduling_duration_seconds_count{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} volume", @@ -291,28 +295,28 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} e2e", "refId": "A" }, { - "expr": "histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} binding", "refId": "B" }, { - "expr": "histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} scheduling algorithm", "refId": "C" }, { - "expr": "histogram_quantile(0.99, sum(rate(scheduler_volume_scheduling_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(scheduler_volume_scheduling_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}} volume", @@ -418,28 +422,28 @@ data: "steppedLine": false, "targets": [ { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"2..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"2..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "2xx", "refId": "A" }, { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"3..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"3..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "3xx", "refId": "B" }, { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"4..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"4..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "4xx", "refId": "C" }, { - "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"5..\"}[5m]))", + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"5..\"}[5m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "5xx", @@ -532,7 +536,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\", verb=\"POST\"}[5m])) by (verb, url, le))", + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\", verb=\"POST\"}[5m])) by (verb, url, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", @@ -638,7 +642,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", @@ -744,7 +748,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "process_resident_memory_bytes{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}", + "expr": "process_resident_memory_bytes{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -837,7 +841,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "rate(process_cpu_seconds_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])", + "expr": "rate(process_cpu_seconds_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -930,7 +934,7 @@ data: "steppedLine": false, "targets": [ { - "expr": "go_goroutines{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}", + "expr": "go_goroutines{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}", "format": "time_series", "intervalFactor": 2, "legendFormat": "{{`{{`}}instance{{`}}`}}", @@ -1014,6 +1018,32 @@ data: "allValue": null, "current": { + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + }, "datasource": "$datasource", "hide": 0, @@ -1024,7 +1054,7 @@ data: "options": [ ], - "query": "label_values(process_cpu_seconds_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\"}, instance)", + "query": "label_values(process_cpu_seconds_total{cluster=\"$cluster\", job=\"{{ include "exporter.kubeScheduler.jobName" . }}\"}, instance)", "refresh": 2, "regex": "", "sort": 1, diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/statefulset.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/statefulset.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/statefulset.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/statefulset.yaml index 3512fada2a2..edc722d21e4 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/statefulset.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/statefulset.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -106,7 +106,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(rate(container_cpu_usage_seconds_total{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", container!=\"\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}[3m]))", + "expr": "sum(rate(container_cpu_usage_seconds_total{job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", container!=\"\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}[3m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -189,7 +189,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(container_memory_usage_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", container!=\"\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}) / 1024^3", + "expr": "sum(container_memory_usage_bytes{job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", container!=\"\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}) / 1024^3", "format": "time_series", "intervalFactor": 2, "legendFormat": "", @@ -272,7 +272,7 @@ data: "tableColumn": "", "targets": [ { - "expr": "sum(rate(container_network_transmit_bytes_total{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", namespace=\"$namespace\",pod=~\"$statefulset.*\"}[3m]))", + "expr": "sum(rate(container_network_transmit_bytes_total{job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{job=\"{{ include "exporter.kubelet.jobName" . }}\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", namespace=\"$namespace\",pod=~\"$statefulset.*\"}[3m]))", "format": "time_series", "intervalFactor": 2, "legendFormat": "", diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/workload-total.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/workload-total.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/workload-total.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/workload-total.yaml index cd4e2364dc4..5d57a2b4675 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/dashboards-1.14/workload-total.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/dashboards-1.14/workload-total.yaml @@ -4,7 +4,7 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if and (or .Values.grafana.enabled .Values.grafana.forceDeployDashboards) (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.defaultDashboardsEnabled }} apiVersion: v1 kind: ConfigMap metadata: @@ -1205,14 +1205,14 @@ data: "datasource": "$datasource", "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, "includeAll": false, - "label": "cluster", + "label": null, "multi": false, "name": "cluster", "options": [ ], "query": "label_values(kube_pod_info, cluster)", - "refresh": 1, + "refresh": 2, "regex": "", "sort": 0, "tagValuesQuery": "", @@ -1243,7 +1243,7 @@ data: ], "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, @@ -1275,7 +1275,7 @@ data: ], "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\"}, workload)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 1, @@ -1307,7 +1307,7 @@ data: ], "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\"}, workload_type)", - "refresh": 1, + "refresh": 2, "regex": "", "skipUrlSync": false, "sort": 0, @@ -1435,4 +1435,4 @@ data: "uid": "728bf77cc1166d2f3133bf25846876cc", "version": 0 } -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/namespaces.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/namespaces.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/namespaces.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/namespaces.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/servicemonitor.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/grafana/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/grafana/servicemonitor.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml index e86610cb7f6..5f886c617e0 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml @@ -57,9 +57,9 @@ spec: tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} {{- with .Values.prometheusOperator.admissionWebhooks.patch.tolerations }} {{ toYaml . | indent 8 }} -{{- end }} + {{- end }} +{{- if .Values.prometheusOperator.admissionWebhooks.patch.securityContext }} securityContext: - runAsGroup: 2000 - runAsNonRoot: true - runAsUser: 2000 +{{ toYaml .Values.prometheusOperator.admissionWebhooks.patch.securityContext | indent 8 }} +{{- end }} {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml index c2742073f76..93f5cdb666b 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -58,9 +58,9 @@ spec: tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} {{- with .Values.prometheusOperator.admissionWebhooks.patch.tolerations }} {{ toYaml . | indent 8 }} -{{- end }} + {{- end }} +{{- if .Values.prometheusOperator.admissionWebhooks.patch.securityContext }} securityContext: - runAsGroup: 2000 - runAsNonRoot: true - runAsUser: 2000 +{{ toYaml .Values.prometheusOperator.admissionWebhooks.patch.securityContext | indent 8 }} +{{- end }} {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml index 2048f049c77..a91889b903c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml @@ -10,6 +10,8 @@ metadata: labels: app: {{ template "kube-prometheus-stack.name" $ }}-admission {{- include "kube-prometheus-stack.labels" $ | indent 4 }} +{{- if .Values.global.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | indent 2 }} {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml similarity index 89% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml index b67df54bf00..f42e33e0d09 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml @@ -5,8 +5,8 @@ metadata: name: {{ template "kube-prometheus-stack.fullname" . }}-admission {{- if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }} annotations: - certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} - cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} {{- end }} labels: app: {{ template "kube-prometheus-stack.name" $ }}-admission diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml similarity index 89% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml index 249488e4178..1439ed54e6a 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml @@ -5,8 +5,8 @@ metadata: name: {{ template "kube-prometheus-stack.fullname" . }}-admission {{- if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }} annotations: - certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} - cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-admission" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} {{- end }} labels: app: {{ template "kube-prometheus-stack.name" $ }}-admission diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/certmanager.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/certmanager.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/certmanager.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/certmanager.yaml index 090e6a5bbcf..cfd516556d9 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/certmanager.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/certmanager.yaml @@ -18,7 +18,7 @@ metadata: namespace: {{ template "kube-prometheus-stack.namespace" . }} spec: secretName: {{ template "kube-prometheus-stack.fullname" . }}-root-cert - duration: 43800h # 5y + duration: 43800h0m0s # 5y issuerRef: name: {{ template "kube-prometheus-stack.fullname" . }}-self-signed-issuer commonName: "ca.webhook.kube-prometheus-stack" @@ -43,7 +43,7 @@ metadata: namespace: {{ template "kube-prometheus-stack.namespace" . }} spec: secretName: {{ template "kube-prometheus-stack.fullname" . }}-admission - duration: 8760h # 1y + duration: 8760h0m0s # 1y issuerRef: {{- if .Values.prometheusOperator.admissionWebhooks.certManager.issuerRef }} {{- toYaml .Values.prometheusOperator.admissionWebhooks.certManager.issuerRef | nindent 4 }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/deployment.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/deployment.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/deployment.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/psp-clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/psp-clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/psp-clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/psp-clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/psp-clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/psp-clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/psp.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/psp.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/psp.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/service.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/service.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/serviceaccount.yaml similarity index 71% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/serviceaccount.yaml index ab41797e3cc..650f53c997c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/serviceaccount.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/serviceaccount.yaml @@ -6,7 +6,11 @@ metadata: namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: app: {{ template "kube-prometheus-stack.name" . }}-operator + app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus-operator + app.kubernetes.io/component: prometheus-operator {{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.global.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | indent 2 }} {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/servicemonitor.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus-operator/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus-operator/servicemonitor.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/_rules.tpl b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/_rules.tpl similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/_rules.tpl rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/_rules.tpl index 83245c089fa..0e33d65e434 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/_rules.tpl +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/_rules.tpl @@ -35,4 +35,4 @@ rules: - "prometheus" - "kubernetes-apps" - "etcd" -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalAlertRelabelConfigs.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalAlertRelabelConfigs.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalAlertRelabelConfigs.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalAlertRelabelConfigs.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalAlertmanagerConfigs.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalAlertmanagerConfigs.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalAlertmanagerConfigs.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalAlertmanagerConfigs.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalPrometheusRules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalPrometheusRules.yaml similarity index 89% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalPrometheusRules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalPrometheusRules.yaml index 794e9ad274d..cb4aabaa7b5 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalPrometheusRules.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalPrometheusRules.yaml @@ -1,6 +1,9 @@ {{- if or .Values.additionalPrometheusRules .Values.additionalPrometheusRulesMap}} apiVersion: v1 kind: List +metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-additional-prometheus-rules + namespace: {{ template "kube-prometheus-stack.namespace" . }} items: {{- if .Values.additionalPrometheusRulesMap }} {{- range $prometheusRuleName, $prometheusRule := .Values.additionalPrometheusRulesMap }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalScrapeConfigs.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalScrapeConfigs.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/additionalScrapeConfigs.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/additionalScrapeConfigs.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingress.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingress.yaml similarity index 56% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingress.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingress.yaml index 4d45873a780..3992789ba0d 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingress.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingress.yaml @@ -1,14 +1,12 @@ -{{- if and .Values.prometheus.enabled .Values.prometheus.ingress.enabled }} -{{- $pathType := .Values.prometheus.ingress.pathType | default "" }} -{{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} -{{- $servicePort := .Values.prometheus.service.port -}} -{{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix }} -{{- $paths := .Values.prometheus.ingress.paths | default $routePrefix -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} -apiVersion: networking.k8s.io/v1beta1 -{{ else }} -apiVersion: extensions/v1beta1 -{{ end -}} +{{- if and .Values.prometheus.enabled .Values.prometheus.ingress.enabled -}} + {{- $pathType := .Values.prometheus.ingress.pathType | default "" -}} + {{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" -}} + {{- $servicePort := .Values.prometheus.service.port -}} + {{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix -}} + {{- $paths := .Values.prometheus.ingress.paths | default $routePrefix -}} + {{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}} + {{- $ingressSupportsPathType := eq (include "kube-prometheus-stack.ingress.supportsPathType" .) "true" -}} +apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }} kind: Ingress metadata: {{- if .Values.prometheus.ingress.annotations }} @@ -24,7 +22,7 @@ metadata: {{ toYaml .Values.prometheus.ingress.labels | indent 4 }} {{- end }} spec: - {{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if $apiIsStable }} {{- if .Values.prometheus.ingress.ingressClassName }} ingressClassName: {{ .Values.prometheus.ingress.ingressClassName }} {{- end }} @@ -37,25 +35,39 @@ spec: paths: {{- range $p := $paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} serviceName: {{ $serviceName }} servicePort: {{ $servicePort }} - {{- end -}} + {{- end }} + {{- end -}} {{- end -}} {{- else }} - http: paths: {{- range $p := $paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} serviceName: {{ $serviceName }} servicePort: {{ $servicePort }} + {{- end }} {{- end -}} {{- end -}} {{- if .Values.prometheus.ingress.tls }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingressThanosSidecar.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingressThanosSidecar.yaml similarity index 70% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingressThanosSidecar.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingressThanosSidecar.yaml index 69de0f6635f..ace4058678d 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingressThanosSidecar.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingressThanosSidecar.yaml @@ -4,11 +4,9 @@ {{- $thanosPort := .Values.prometheus.thanosIngress.servicePort -}} {{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix }} {{- $paths := .Values.prometheus.thanosIngress.paths | default $routePrefix -}} -{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} -apiVersion: networking.k8s.io/v1beta1 -{{ else }} -apiVersion: extensions/v1beta1 -{{ end -}} +{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "kube-prometheus-stack.ingress.supportsPathType" .) "true" -}} +apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" . }} kind: Ingress metadata: {{- if .Values.prometheus.thanosIngress.annotations }} @@ -23,7 +21,7 @@ metadata: {{ toYaml .Values.prometheus.thanosIngress.labels | indent 4 }} {{- end }} spec: - {{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if $apiIsStable }} {{- if .Values.prometheus.thanosIngress.ingressClassName }} ingressClassName: {{ .Values.prometheus.thanosIngress.ingressClassName }} {{- end }} @@ -36,25 +34,39 @@ spec: paths: {{- range $p := $paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $thanosPort }} + {{- else }} serviceName: {{ $serviceName }} servicePort: {{ $thanosPort }} - {{- end -}} + {{- end }} + {{- end -}} {{- end -}} {{- else }} - http: paths: {{- range $p := $paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $thanosPort }} + {{- else }} serviceName: {{ $serviceName }} servicePort: {{ $thanosPort }} + {{- end }} {{- end -}} {{- end -}} {{- if .Values.prometheus.thanosIngress.tls }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingressperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingressperreplica.yaml similarity index 75% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingressperreplica.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingressperreplica.yaml index 33143775bcd..df631993bae 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/ingressperreplica.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/ingressperreplica.yaml @@ -3,6 +3,8 @@ {{- $count := .Values.prometheus.prometheusSpec.replicas | int -}} {{- $servicePort := .Values.prometheus.servicePerReplica.port -}} {{- $ingressValues := .Values.prometheus.ingressPerReplica -}} +{{- $apiIsStable := eq (include "kube-prometheus-stack.ingress.isStable" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "kube-prometheus-stack.ingress.supportsPathType" .) "true" -}} apiVersion: v1 kind: List metadata: @@ -11,17 +13,13 @@ metadata: items: {{ range $i, $e := until $count }} - kind: Ingress - {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} - apiVersion: networking.k8s.io/v1beta1 - {{ else }} - apiVersion: extensions/v1beta1 - {{ end -}} + apiVersion: {{ include "kube-prometheus-stack.ingress.apiVersion" $ }} metadata: name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} namespace: {{ template "kube-prometheus-stack.namespace" $ }} labels: app: {{ include "kube-prometheus-stack.name" $ }}-prometheus -{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{ include "kube-prometheus-stack.labels" $ | indent 8 }} {{- if $ingressValues.labels }} {{ toYaml $ingressValues.labels | indent 8 }} {{- end }} @@ -30,7 +28,7 @@ items: {{ toYaml $ingressValues.annotations | indent 8 }} {{- end }} spec: - {{- if or ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") ($.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if $apiIsStable }} {{- if $ingressValues.ingressClassName }} ingressClassName: {{ $ingressValues.ingressClassName }} {{- end }} @@ -41,12 +39,19 @@ items: paths: {{- range $p := $ingressValues.paths }} - path: {{ tpl $p $ }} - {{- if $pathType }} + {{- if and $pathType $ingressSupportsPathType }} pathType: {{ $pathType }} {{- end }} backend: + {{- if $apiIsStable }} + service: + name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} + port: + number: {{ $servicePort }} + {{- else }} serviceName: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} servicePort: {{ $servicePort }} + {{- end }} {{- end -}} {{- if or $ingressValues.tlsSecretName $ingressValues.tlsSecretPerReplica.enabled }} tls: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/nginx-config.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/nginx-config.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/nginx-config.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/nginx-config.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/podDisruptionBudget.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/podDisruptionBudget.yaml similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/podDisruptionBudget.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/podDisruptionBudget.yaml index 573317a32f2..cce4a855c04 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/podDisruptionBudget.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/podDisruptionBudget.yaml @@ -16,6 +16,6 @@ spec: {{- end }} selector: matchLabels: - app: prometheus + app.kubernetes.io/name: prometheus prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/podmonitors.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/podmonitors.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/podmonitors.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/podmonitors.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/prometheus.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/prometheus.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/prometheus.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/prometheus.yaml index 9c30c814ca6..cf2056cb987 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/prometheus.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/prometheus.yaml @@ -60,9 +60,6 @@ spec: externalUrl: "{{ .Values.global.cattle.url }}/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Values.namespaceOverride }}/services/http:{{ template "kube-prometheus-stack.fullname" . }}-prometheus:{{ .Values.prometheus.service.port }}/proxy" {{- else }} externalUrl: http://{{ template "kube-prometheus-stack.fullname" . }}-prometheus.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.prometheus.service.port }} -{{- end }} -{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }} - ignoreNamespaceSelectors: {{ .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }} {{- end }} nodeSelector: {{ include "linux-node-selector" . | nindent 4 }} {{- if .Values.prometheus.prometheusSpec.nodeSelector }} @@ -75,6 +72,12 @@ spec: logFormat: {{ .Values.prometheus.prometheusSpec.logFormat }} listenLocal: {{ .Values.prometheus.prometheusSpec.listenLocal }} enableAdminAPI: {{ .Values.prometheus.prometheusSpec.enableAdminAPI }} +{{- if .Values.prometheus.prometheusSpec.enableFeatures }} + enableFeatures: +{{- range $enableFeatures := .Values.prometheus.prometheusSpec.enableFeatures }} + - {{ tpl $enableFeatures $ }} +{{- end }} +{{- end }} {{- if .Values.prometheus.prometheusSpec.scrapeInterval }} scrapeInterval: {{ .Values.prometheus.prometheusSpec.scrapeInterval }} {{- end }} @@ -155,13 +158,23 @@ spec: {{ else }} probeNamespaceSelector: {} {{- end }} -{{- if .Values.prometheus.prometheusSpec.remoteRead }} +{{- if (or .Values.prometheus.prometheusSpec.remoteRead .Values.prometheus.prometheusSpec.additionalRemoteRead) }} remoteRead: -{{ toYaml .Values.prometheus.prometheusSpec.remoteRead | indent 4 }} +{{- if .Values.prometheus.prometheusSpec.remoteRead }} +{{ tpl (toYaml .Values.prometheus.prometheusSpec.remoteRead | indent 4) . }} {{- end }} -{{- if .Values.prometheus.prometheusSpec.remoteWrite }} +{{- if .Values.prometheus.prometheusSpec.additionalRemoteRead }} +{{ toYaml .Values.prometheus.prometheusSpec.additionalRemoteRead | indent 4 }} +{{- end }} +{{- end }} +{{- if (or .Values.prometheus.prometheusSpec.remoteWrite .Values.prometheus.prometheusSpec.additionalRemoteWrite) }} remoteWrite: -{{ toYaml .Values.prometheus.prometheusSpec.remoteWrite | indent 4 }} +{{- if .Values.prometheus.prometheusSpec.remoteWrite }} +{{ tpl (toYaml .Values.prometheus.prometheusSpec.remoteWrite | indent 4) . }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.additionalRemoteWrite }} +{{ toYaml .Values.prometheus.prometheusSpec.additionalRemoteWrite | indent 4 }} +{{- end }} {{- end }} {{- if .Values.prometheus.prometheusSpec.securityContext }} securityContext: @@ -207,7 +220,7 @@ spec: - topologyKey: {{ .Values.prometheus.prometheusSpec.podAntiAffinityTopologyKey }} labelSelector: matchExpressions: - - {key: app, operator: In, values: [prometheus]} + - {key: app.kubernetes.io/name, operator: In, values: [prometheus]} - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]} {{- else if eq .Values.prometheus.prometheusSpec.podAntiAffinity "soft" }} podAntiAffinity: @@ -217,7 +230,7 @@ spec: topologyKey: {{ .Values.prometheus.prometheusSpec.podAntiAffinityTopologyKey }} labelSelector: matchExpressions: - - {key: app, operator: In, values: [prometheus]} + - {key: app.kubernetes.io/name, operator: In, values: [prometheus]} - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]} {{- end }} {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/psp-clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/psp-clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/psp-clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/psp-clusterrolebinding.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/psp-clusterrolebinding.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/psp-clusterrolebinding.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/psp.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/psp.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/psp.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/alertmanager.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/alertmanager.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/alertmanager.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/alertmanager.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/etcd.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/etcd.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/etcd.yaml index 85287315c18..c3702bd318f 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/etcd.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/etcd.yaml @@ -1,5 +1,5 @@ {{- /* -Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/v3.4.0/op-guide/etcd3_alert.rules.yml +Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/en/docs/v3.4/op-guide/etcd3_alert.rules.yml Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} @@ -178,4 +178,4 @@ spec: {{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} {{- end }} {{- end }} -{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/general.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/general.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/general.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/general.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/k8s.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/k8s.rules.yaml similarity index 72% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/k8s.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/k8s.rules.yaml index 19511e8fb16..1d69d9f5f07 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/k8s.rules.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/k8s.rules.yaml @@ -26,57 +26,57 @@ spec: rules: - expr: |- sum by (cluster, namespace, pod, container) ( - rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!="", container!="POD"}[5m]) + rate(container_cpu_usage_seconds_total{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics/cadvisor", image!=""}[5m]) ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}) ) record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate - expr: |- - container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + container_memory_working_set_bytes{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=""}) ) record: node_namespace_pod_container:container_memory_working_set_bytes - expr: |- - container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + container_memory_rss{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=""}) ) record: node_namespace_pod_container:container_memory_rss - expr: |- - container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + container_memory_cache{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=""}) ) record: node_namespace_pod_container:container_memory_cache - expr: |- - container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + container_memory_swap{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics/cadvisor", image!=""} * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, max by(namespace, pod, node) (kube_pod_info{node!=""}) ) record: node_namespace_pod_container:container_memory_swap - expr: |- - sum by (namespace) ( - sum by (namespace, pod) ( - max by (namespace, pod, container) ( - kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"} - ) * on(namespace, pod) group_left() max by (namespace, pod) ( - kube_pod_status_phase{phase=~"Pending|Running"} == 1 + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="memory",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) ) ) - record: namespace:kube_pod_container_resource_requests_memory_bytes:sum + record: namespace_memory:kube_pod_container_resource_requests:sum - expr: |- - sum by (namespace) ( - sum by (namespace, pod) ( - max by (namespace, pod, container) ( - kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"} - ) * on(namespace, pod) group_left() max by (namespace, pod) ( + sum by (namespace, cluster) ( + sum by (namespace, pod, cluster) ( + max by (namespace, pod, container, cluster) ( + kube_pod_container_resource_requests{resource="cpu",job="kube-state-metrics"} + ) * on(namespace, pod, cluster) group_left() max by (namespace, pod) ( kube_pod_status_phase{phase=~"Pending|Running"} == 1 ) ) ) - record: namespace:kube_pod_container_resource_requests_cpu_cores:sum + record: namespace_cpu:kube_pod_container_resource_requests:sum - expr: |- max by (cluster, namespace, workload, pod) ( label_replace( diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-state-metrics.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-state-metrics.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kube-state-metrics.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kube-state-metrics.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubelet.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubelet.rules.yaml similarity index 82% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubelet.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubelet.rules.yaml index 8712b9ff5ea..216132ec8fb 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubelet.rules.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubelet.rules.yaml @@ -4,7 +4,8 @@ Do not change in-place! In order to change this file first read following link: https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack */ -}} {{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} -{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubelet.enabled .Values.defaultRules.rules.kubelet }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubelet }} +{{- if (include "exporter.kubelet.enabled" .) }} apiVersion: monitoring.coreos.com/v1 kind: PrometheusRule metadata: @@ -24,16 +25,17 @@ spec: groups: - name: kubelet.rules rules: - - expr: histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + - expr: histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics"}) labels: quantile: '0.99' record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - - expr: histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + - expr: histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics"}) labels: quantile: '0.9' record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile - - expr: histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + - expr: histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics"}) labels: quantile: '0.5' record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile +{{- end }} {{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-apps.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-apps.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-apps.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-apps.yaml index 198bbb845b0..77bb40a1ee7 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-apps.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-apps.yaml @@ -82,7 +82,7 @@ spec: != kube_deployment_status_replicas_available{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} ) and ( - changes(kube_deployment_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m]) + changes(kube_deployment_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[10m]) == 0 ) @@ -103,7 +103,7 @@ spec: != kube_statefulset_status_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} ) and ( - changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m]) + changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[10m]) == 0 ) @@ -273,7 +273,7 @@ spec: < kube_hpa_spec_max_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}) and - changes(kube_hpa_status_current_replicas[15m]) == 0 + changes(kube_hpa_status_current_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[15m]) == 0 for: 15m labels: severity: warning diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-resources.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-resources.yaml similarity index 91% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-resources.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-resources.yaml index 898f8eed23f..27babbd378a 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-resources.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-resources.yaml @@ -30,11 +30,11 @@ spec: runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuovercommit summary: Cluster has overcommitted CPU resource requests. expr: |- - sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum{}) + sum(namespace_cpu:kube_pod_container_resource_requests:sum{}) / - sum(kube_node_status_allocatable_cpu_cores) + sum(kube_node_status_allocatable{resource="cpu"}) > - (count(kube_node_status_allocatable_cpu_cores)-1) / count(kube_node_status_allocatable_cpu_cores) + ((count(kube_node_status_allocatable{resource="cpu"}) > 1) - 1) / count(kube_node_status_allocatable{resource="cpu"}) for: 5m labels: severity: warning @@ -47,13 +47,13 @@ spec: runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememoryovercommit summary: Cluster has overcommitted memory resource requests. expr: |- - sum(namespace:kube_pod_container_resource_requests_memory_bytes:sum{}) + sum(namespace_memory:kube_pod_container_resource_requests:sum{}) / - sum(kube_node_status_allocatable_memory_bytes) + sum(kube_node_status_allocatable{resource="memory"}) > - (count(kube_node_status_allocatable_memory_bytes)-1) + ((count(kube_node_status_allocatable{resource="memory"}) > 1) - 1) / - count(kube_node_status_allocatable_memory_bytes) + count(kube_node_status_allocatable{resource="memory"}) for: 5m labels: severity: warning @@ -68,7 +68,7 @@ spec: expr: |- sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="cpu"}) / - sum(kube_node_status_allocatable_cpu_cores) + sum(kube_node_status_allocatable{resource="cpu"}) > 1.5 for: 5m labels: @@ -84,7 +84,7 @@ spec: expr: |- sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="memory"}) / - sum(kube_node_status_allocatable_memory_bytes{job="kube-state-metrics"}) + sum(kube_node_status_allocatable{resource="memory",job="kube-state-metrics"}) > 1.5 for: 5m labels: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-storage.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-storage.yaml similarity index 82% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-storage.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-storage.yaml index 527e6e30887..ff71f8ddc51 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-storage.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-storage.yaml @@ -31,9 +31,9 @@ spec: runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumefillingup summary: PersistentVolume is filling up. expr: |- - kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + kubelet_volume_stats_available_bytes{job="{{ include "exporter.kubelet.jobName" . }}", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} / - kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + kubelet_volume_stats_capacity_bytes{job="{{ include "exporter.kubelet.jobName" . }}", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} < 0.03 for: 1m labels: @@ -48,12 +48,12 @@ spec: summary: PersistentVolume is filling up. expr: |- ( - kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + kubelet_volume_stats_available_bytes{job="{{ include "exporter.kubelet.jobName" . }}", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} / - kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + kubelet_volume_stats_capacity_bytes{job="{{ include "exporter.kubelet.jobName" . }}", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} ) < 0.15 and - predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 + predict_linear(kubelet_volume_stats_available_bytes{job="{{ include "exporter.kubelet.jobName" . }}", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 for: 1h labels: severity: warning diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml similarity index 99% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml index 2ed298b35d5..c3110cfb357 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml @@ -51,7 +51,7 @@ spec: description: An aggregated API {{`{{`}} $labels.name {{`}}`}}/{{`{{`}} $labels.namespace {{`}}`}} has reported errors. It has appeared unavailable {{`{{`}} $value | humanize {{`}}`}} times averaged over the past 10m. runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-aggregatedapierrors summary: An aggregated API has reported errors. - expr: sum by(name, namespace)(increase(aggregator_unavailable_apiservice_count[10m])) > 4 + expr: sum by(name, namespace)(increase(aggregator_unavailable_apiservice_total[10m])) > 4 labels: severity: warning {{- if .Values.defaultRules.additionalRuleLabels }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml similarity index 95% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml index 4d536ec2d65..dbec4e95802 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml @@ -59,7 +59,7 @@ spec: ) / max by(node) ( - kube_node_status_capacity_pods{job="kube-state-metrics"} != 1 + kube_node_status_capacity{job="kube-state-metrics",resource="pods"} != 1 ) > 0.95 for: 15m labels: @@ -96,7 +96,7 @@ spec: description: Kubelet Pod startup 99th percentile latency is {{`{{`}} $value {{`}}`}} seconds on node {{`{{`}} $labels.node {{`}}`}}. runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletpodstartuplatencyhigh summary: Kubelet Pod startup latency is too high. - expr: histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (instance, le)) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"} > 60 + expr: histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics"}[5m])) by (instance, le)) * on(instance) group_left(node) kubelet_node_name{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics"} > 60 for: 15m labels: severity: warning @@ -171,13 +171,13 @@ spec: {{- if .Values.defaultRules.additionalRuleLabels }} {{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} {{- end }} -{{- if .Values.prometheusOperator.kubeletService.enabled }} +{{- if (include "exporter.kubeletService.enabled" .) }} - alert: KubeletDown annotations: description: Kubelet has disappeared from Prometheus target discovery. runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletdown summary: Target disappeared from Prometheus target discovery. - expr: absent(up{job="kubelet", metrics_path="/metrics"} == 1) + expr: absent(up{job="{{ include "exporter.kubelet.jobName" . }}", metrics_path="/metrics"} == 1) for: 15m labels: severity: critical diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system.yaml similarity index 94% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system.yaml index 52230c62e03..ea2f2589ced 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/kubernetes-system.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/kubernetes-system.yaml @@ -29,7 +29,7 @@ spec: description: There are {{`{{`}} $value {{`}}`}} different semantic versions of Kubernetes components running. runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeversionmismatch summary: Different semantic versions of Kubernetes components running. - expr: count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*).*"))) > 1 + expr: count(count by (git_version) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"git_version","$1","git_version","(v[0-9]*.[0-9]*).*"))) > 1 for: 15m labels: severity: warning diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node-exporter.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node-exporter.rules.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node-exporter.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node-exporter.rules.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node-exporter.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node-exporter.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node-exporter.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node-exporter.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node-network.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node-network.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node-network.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node-network.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node.rules.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node.rules.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node.rules.yaml index c841e6f6ea0..f24c5550ba3 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/node.rules.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/node.rules.yaml @@ -34,7 +34,7 @@ spec: count by (cluster, node) (sum by (node, cpu) ( node_cpu_seconds_total{job="node-exporter"} * on (namespace, pod) group_left(node) - node_namespace_pod:kube_pod_info: + topk by(namespace, pod) (1, node_namespace_pod:kube_pod_info:) )) record: node:node_num_cpu:sum - expr: |- diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/prometheus-operator.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/prometheus-operator.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/prometheus-operator.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/prometheus-operator.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/prometheus.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/prometheus.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/rules-1.14/prometheus.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/rules-1.14/prometheus.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/service.yaml similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/service.yaml index 8676b81ead6..c6420060ae0 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/service.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/service.yaml @@ -51,7 +51,7 @@ spec: {{ toYaml .Values.prometheus.service.additionalPorts | indent 2 }} {{- end }} selector: - app: prometheus + app.kubernetes.io/name: prometheus prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus {{- if .Values.prometheus.service.sessionAffinity }} sessionAffinity: {{ .Values.prometheus.service.sessionAffinity }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceThanosSidecar.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceThanosSidecar.yaml similarity index 96% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceThanosSidecar.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceThanosSidecar.yaml index 7c33379cb4b..c3d52ef8067 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceThanosSidecar.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceThanosSidecar.yaml @@ -25,6 +25,6 @@ spec: nodePort: {{ .Values.prometheus.thanosService.nodePort }} {{- end }} selector: - app: prometheus + app.kubernetes.io/name: prometheus prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceThanosSidecarExternal.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceThanosSidecarExternal.yaml new file mode 100644 index 00000000000..99668f425d5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceThanosSidecarExternal.yaml @@ -0,0 +1,28 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.thanosServiceExternal.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-external + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.thanosServiceExternal.labels }} +{{ toYaml .Values.prometheus.thanosServiceExternal.labels | indent 4 }} +{{- end }} +{{- if .Values.prometheus.thanosServiceExternal.annotations }} + annotations: +{{ toYaml .Values.prometheus.thanosServiceExternal.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.prometheus.thanosServiceExternal.type }} + ports: + - name: {{ .Values.prometheus.thanosServiceExternal.portName }} + port: {{ .Values.prometheus.thanosServiceExternal.port }} + targetPort: {{ .Values.prometheus.thanosServiceExternal.targetPort }} + {{- if eq .Values.prometheus.thanosServiceExternal.type "NodePort" }} + nodePort: {{ .Values.prometheus.thanosServiceExternal.nodePort }} + {{- end }} + selector: + app.kubernetes.io/name: prometheus + prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceaccount.yaml similarity index 77% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceaccount.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceaccount.yaml index 862d5f8e44f..0b9929bc60a 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceaccount.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceaccount.yaml @@ -6,11 +6,15 @@ metadata: namespace: {{ template "kube-prometheus-stack.namespace" . }} labels: app: {{ template "kube-prometheus-stack.name" . }}-prometheus + app.kubernetes.io/name: {{ template "kube-prometheus-stack.name" . }}-prometheus + app.kubernetes.io/component: prometheus {{ include "kube-prometheus-stack.labels" . | indent 4 }} {{- if .Values.prometheus.serviceAccount.annotations }} annotations: {{ toYaml .Values.prometheus.serviceAccount.annotations | indent 4 }} {{- end }} +{{- if .Values.global.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.global.imagePullSecrets | indent 2 }} {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/servicemonitor.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/servicemonitor.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/servicemonitors.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/servicemonitors.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/servicemonitors.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/servicemonitors.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceperreplica.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceperreplica.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceperreplica.yaml index 1a55433621f..470ce79f201 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/prometheus/serviceperreplica.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/prometheus/serviceperreplica.yaml @@ -38,7 +38,7 @@ items: port: {{ $serviceValues.port }} targetPort: {{ $serviceValues.targetPort }} selector: - app: prometheus + app.kubernetes.io/name: prometheus prometheus: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus statefulset.kubernetes.io/pod-name: prometheus-{{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} type: "{{ $serviceValues.type }}" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/clusterrole.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/clusterrole.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/clusterrole.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/config-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/config-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/config-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/config-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboard-role.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboard-role.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboard-role.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboard-role.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml similarity index 80% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml index 20c57dd2a14..7b51a0bf7ab 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml @@ -1,12 +1,12 @@ -# Source: {{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.ingressNginx.enabled }} apiVersion: v1 kind: ConfigMap metadata: namespace: {{ .Values.grafana.defaultDashboards.namespace }} name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "ingress-nginx" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + {{- if .Values.grafana.sidecar.dashboards.annotations }} + annotations: {{ toYaml .Values.grafana.sidecar.dashboards.annotations | nindent 4 }} + {{- end }} labels: {{- if $.Values.grafana.sidecar.dashboards.label }} {{ $.Values.grafana.sidecar.dashboards.label }}: "1" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml similarity index 71% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml index d256576ad34..53a9ad68978 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml @@ -1,4 +1,7 @@ -{{- if .Values.ingressNginx.enabled }} +{{- if and (not .Values.ingressNginx.enabled) (.Values.rkeIngressNginx.enabled) }} +{{- fail "Cannot set .Values.rkeIngressNginx.enabled=true when .Values.ingressNginx.enabled=false" }} +{{- end }} +{{- if and .Values.ingressNginx.enabled (not .Values.rkeIngressNginx.enabled) }} apiVersion: v1 kind: Service metadata: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml similarity index 73% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml index 64377877274..0cbc07f697d 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml @@ -1,4 +1,7 @@ -{{- if .Values.ingressNginx.enabled }} +{{- if and (not .Values.ingressNginx.enabled) (.Values.rkeIngressNginx.enabled) }} +{{- fail "Cannot set .Values.rkeIngressNginx.enabled=true when .Values.ingressNginx.enabled=false" }} +{{- end }} +{{- if and .Values.ingressNginx.enabled (not .Values.rkeIngressNginx.enabled) }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: @@ -21,6 +24,9 @@ spec: {{- if .Values.ingressNginx.serviceMonitor.interval}} interval: {{ .Values.ingressNginx.serviceMonitor.interval }} {{- end }} + {{- if .Values.ingressNginx.serviceMonitor.proxyUrl }} + proxyUrl: {{ .Values.ingressNginx.serviceMonitor.proxyUrl}} + {{- end }} bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token {{- if .Values.ingressNginx.serviceMonitor.metricRelabelings }} metricRelabelings: diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/hardened.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/hardened.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/rancher-monitoring/hardened.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/rancher-monitoring/hardened.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/validate-install-crd.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/validate-install-crd.yaml similarity index 100% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/templates/validate-install-crd.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/templates/validate-install-crd.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/values.yaml b/charts/rancher-monitoring/rancher-monitoring/16.6.0/values.yaml similarity index 92% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/values.yaml rename to charts/rancher-monitoring/rancher-monitoring/16.6.0/values.yaml index 62aa60e8542..2782360246f 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/values.yaml +++ b/charts/rancher-monitoring/rancher-monitoring/16.6.0/values.yaml @@ -82,21 +82,66 @@ rkeEtcd: - effect: "NoSchedule" operator: "Exists" +rkeIngressNginx: + enabled: false + metricsPort: 10254 + component: ingress-nginx + clients: + port: 10015 + useLocalhost: true + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + nodeSelector: + node-role.kubernetes.io/worker: "true" + ## k3s PushProx Monitoring ## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox ## k3sServer: enabled: false - metricsPort: 10249 + metricsPort: 10250 component: k3s-server clients: port: 10013 useLocalhost: true + https: + enabled: true + useServiceAccountCredentials: true + insecureSkipVerify: true + rbac: + additionalRules: + - nonResourceURLs: ["/metrics/cadvisor"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes/metrics"] + verbs: ["get"] tolerations: - effect: "NoExecute" operator: "Exists" - effect: "NoSchedule" operator: "Exists" + serviceMonitor: + endpoints: + - port: metrics + honorLabels: true + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + - port: metrics + path: /metrics/cadvisor + honorLabels: true + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + - port: metrics + path: /metrics/probes + honorLabels: true + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path ## KubeADM PushProx Monitoring ## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox @@ -228,6 +273,105 @@ rke2Etcd: - effect: "NoSchedule" operator: "Exists" +rke2IngressNginx: + enabled: false + metricsPort: 10254 + component: ingress-nginx + clients: + port: 10015 + useLocalhost: true + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: "app.kubernetes.io/component" + operator: "In" + values: + - "controller" + topologyKey: "kubernetes.io/hostname" + namespaces: + - "kube-system" + # in the RKE2 cluster, the ingress-nginx-controller is deployed as + # a Deployment with 1 pod when RKE2 version is <= 1.20, + # a DaemonSet when RKE2 version is >= 1.21 + deployment: + enabled: false + replicas: 1 + + + +## Additional PushProx Monitoring +## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox +## + +# hardenedKubelet can only be deployed if kubelet.enabled=true +# If enabled, it replaces the ServiceMonitor deployed by the default kubelet option with a +# PushProx-based exporter that does not require a host port to be open to scrape metrics. +hardenedKubelet: + enabled: false + metricsPort: 10250 + component: kubelet + clients: + port: 10015 + useLocalhost: true + https: + enabled: true + useServiceAccountCredentials: true + insecureSkipVerify: true + rbac: + additionalRules: + - nonResourceURLs: ["/metrics/cadvisor"] + verbs: ["get"] + - apiGroups: [""] + resources: ["nodes/metrics"] + verbs: ["get"] + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + serviceMonitor: + endpoints: + - port: metrics + honorLabels: true + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + - port: metrics + path: /metrics/cadvisor + honorLabels: true + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + - port: metrics + path: /metrics/probes + honorLabels: true + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + +# hardenedNodeExporter can only be deployed if nodeExporter.enabled=true +# If enabled, it replaces the ServiceMonitor deployed by the default nodeExporter with a +# PushProx-based exporter that does not require a host port to be open to scrape metrics. +hardenedNodeExporter: + enabled: false + metricsPort: 9796 + component: node-exporter + clients: + port: 10016 + useLocalhost: true + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + ## Component scraping nginx-ingress-controller ## ingressNginx: @@ -247,6 +391,10 @@ ingressNginx: ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## metric relabel configs to apply to samples before ingestion. ## metricRelabelings: [] @@ -280,6 +428,10 @@ namespaceOverride: "cattle-monitoring-system" ## kubeTargetVersionOverride: "" +## Allow kubeVersion to be overridden while creating the ingress +## +kubeVersionOverride: "" + ## Provide a name to substitute for the full names of resources ## fullnameOverride: "" @@ -407,6 +559,10 @@ alertmanager: ## enabled: true + ## Annotations for Alertmanager + ## + annotations: {} + ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 ## apiVersion: v2 @@ -433,22 +589,6 @@ alertmanager: ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file ## https://prometheus.io/webtools/alerting/routing-tree-editor/ ## - ## Example Slack Config - ## config: - ## route: - ## group_by: ['job'] - ## group_wait: 30s - ## group_interval: 5m - ## repeat_interval: 3h - ## receiver: 'slack-notifications' - ## receivers: - ## - name: 'slack-notifications' - ## slack_configs: - ## - send_resolved: true - ## text: '{{ template "slack.rancher.text" . }}' - ## api_url: - ## templates: - ## - /etc/alertmanager/config/*.tmpl config: global: resolve_timeout: 5m @@ -721,6 +861,10 @@ alertmanager: interval: "" selfMonitor: true + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. scheme: "" @@ -728,7 +872,7 @@ alertmanager: ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig tlsConfig: {} - bearerTokenFile: "" + bearerTokenFile: ## metric relabel configs to apply to samples before ingestion. ## @@ -751,7 +895,7 @@ alertmanager: ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec ## alertmanagerSpec: - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. ## podMetadata: {} @@ -759,8 +903,8 @@ alertmanager: ## Image of Alertmanager ## image: - repository: rancher/mirrored-prom-alertmanager - tag: v0.21.0 + repository: rancher/mirrored-prometheus-alertmanager + tag: v0.22.2 sha: "" ## If true then the user will be responsible to provide a secret with alertmanager configuration @@ -1005,6 +1149,14 @@ grafana: deploymentStrategy: type: Recreate + ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled + ## + forceDeployDatasources: false + + ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled + ## + forceDeployDashboards: false + ## Deploy default dashboards. ## defaultDashboardsEnabled: true @@ -1070,6 +1222,10 @@ grafana: enabled: true defaultDatasourceEnabled: true + ## URL of prometheus datasource + ## + # url: http://prometheus-stack-prometheus:9090/ + # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default # defaultDatasourceScrapeInterval: 15s @@ -1127,7 +1283,7 @@ grafana: proxy: image: repository: rancher/mirrored-library-nginx - tag: 1.19.9-alpine + tag: 1.21.0-alpine ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod extraContainers: | @@ -1208,23 +1364,14 @@ kubeApiServer: tlsConfig: serverName: kubernetes insecureSkipVerify: false - - ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service - ## - relabelings: [] - # - sourceLabels: - # - __meta_kubernetes_namespace - # - __meta_kubernetes_service_name - # - __meta_kubernetes_endpoint_port_name - # action: keep - # regex: default;kubernetes;https - # - targetLabel: __address__ - # replacement: kubernetes.default.svc:443 - serviceMonitor: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + jobLabel: component selector: matchLabels: @@ -1237,6 +1384,15 @@ kubeApiServer: # - action: keep # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' # sourceLabels: [__name__] + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 ## Component scraping the kubelet and kubelet-hosted cAdvisor ## @@ -1249,6 +1405,10 @@ kubelet: ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping the kubelet over https. For requirements to enable this see ## https://github.com/prometheus-operator/prometheus-operator/issues/926 ## @@ -1381,6 +1541,10 @@ kubeControllerManager: ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping kube-controller-manager over https. ## Requires proper certs (not self-signed) and delegated authentication/authorization checks ## @@ -1423,6 +1587,10 @@ coreDns: ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## metric relabel configs to apply to samples before ingestion. ## metricRelabelings: [] @@ -1458,6 +1626,10 @@ kubeDns: ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## metric relabel configs to apply to samples before ingestion. ## metricRelabelings: [] @@ -1526,6 +1698,9 @@ kubeEtcd: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" scheme: http insecureSkipVerify: false serverName: "" @@ -1577,6 +1752,9 @@ kubeScheduler: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" ## Enable scraping kube-scheduler over https. ## Requires proper certs (not self-signed) and delegated authentication/authorization checks ## @@ -1631,6 +1809,10 @@ kubeProxy: ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping kube-proxy over https. ## Requires proper certs (not self-signed) and delegated authentication/authorization checks ## @@ -1659,9 +1841,15 @@ kubeStateMetrics: ## Scrape interval. If not set, the Prometheus default scrape interval is used. ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" ## Override serviceMonitor selector ## selectorOverride: {} + ## Override namespace selector + ## + namespaceOverride: "" ## metric relabel configs to apply to samples before ingestion. ## @@ -1710,6 +1898,10 @@ nodeExporter: ## interval: "" + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. ## scrapeTimeout: "" @@ -1785,7 +1977,7 @@ prometheusOperator: enabled: true image: repository: rancher/mirrored-jettech-kube-webhook-certgen - tag: v1.5.0 + tag: v1.5.2 sha: "" pullPolicy: IfNotPresent resources: {} @@ -1796,6 +1988,16 @@ prometheusOperator: nodeSelector: {} affinity: {} tolerations: [] + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + # Use certmanager to generate webhook certs certManager: enabled: false @@ -1983,7 +2185,7 @@ prometheusOperator: ## image: repository: rancher/mirrored-prometheus-operator-prometheus-operator - tag: v0.46.0 + tag: v0.48.0 sha: "" pullPolicy: IfNotPresent @@ -1999,7 +2201,7 @@ prometheusOperator: ## prometheusConfigReloaderImage: repository: rancher/mirrored-prometheus-operator-prometheus-config-reloader - tag: v0.46.0 + tag: v0.48.0 sha: "" ## Set the prometheus config reloader side-car CPU limit @@ -2030,6 +2232,7 @@ prometheus: serviceAccount: create: true name: "" + annotations: {} # Service for thanos service discovery on sidecar # Enable this can make Thanos Query can use @@ -2053,9 +2256,19 @@ prometheus: ## nodePort: 30901 + # Service for external access to sidecar + # Enabling this creates a service to expose thanos-sidecar outside the cluster. + thanosServiceExternal: + enabled: false + annotations: {} + labels: {} + portName: grpc + port: 10901 + targetPort: "grpc" + ## Service type ## - type: ClusterIP + type: LoadBalancer ## Port to expose on each node ## @@ -2322,11 +2535,16 @@ prometheus: ## enableAdminAPI: false + # EnableFeatures API enables access to Prometheus disabled features. + # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ + enableFeatures: [] + # - exemplar-storage + ## Image of Prometheus. ## image: repository: rancher/mirrored-prometheus-prometheus - tag: v2.24.0 + tag: v2.27.1 sha: "" ## Tolerations for use with node taints @@ -2552,7 +2770,7 @@ prometheus: ## routePrefix: / - ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata ## Metadata Labels and Annotations gets propagated to the prometheus pods. ## podMetadata: {} @@ -2589,11 +2807,15 @@ prometheus: ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec remoteRead: [] # - url: http://remote1/read + ## additionalRemoteRead is appended to remoteRead + additionalRemoteRead: [] ## The remote_write spec configuration for Prometheus. ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec remoteWrite: [] # - url: http://remote1/push + ## additionalRemoteWrite is appended to remoteWrite + additionalRemoteWrite: [] ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature remoteWriteDashboards: false @@ -2745,7 +2967,7 @@ prometheus: proxy: image: repository: rancher/mirrored-library-nginx - tag: 1.19.9-alpine + tag: 1.21.0-alpine ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. ## if using proxy extraContainer update targetPort with proxy container port diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/.helmignore b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/.helmignore similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/.helmignore rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/.helmignore diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/Chart.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/Chart.yaml similarity index 95% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/Chart.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/Chart.yaml index 70c2b0103dd..c615b0fc669 100644 --- a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/Chart.yaml +++ b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/Chart.yaml @@ -20,4 +20,4 @@ maintainers: name: rancher-node-exporter sources: - https://github.com/prometheus/node_exporter/ -version: 1.16.201+up1.16.2 +version: 1.18.100+up1.18.1 diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/OWNERS b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/OWNERS similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/OWNERS rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/OWNERS diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/README.md b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/README.md similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/README.md rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/README.md diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/ci/port-values.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/ci/port-values.yaml similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/ci/port-values.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/ci/port-values.yaml diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/NOTES.txt b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/NOTES.txt similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/NOTES.txt rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/NOTES.txt diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/_helpers.tpl b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/_helpers.tpl similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/_helpers.tpl rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/_helpers.tpl diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/daemonset.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/daemonset.yaml similarity index 97% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/daemonset.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/daemonset.yaml index a3a1bc8853a..bd64e6948be 100644 --- a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/daemonset.yaml +++ b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/daemonset.yaml @@ -29,6 +29,10 @@ spec: {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} + {{- if .Values.extraInitContainers }} + initContainers: + {{ toYaml .Values.extraInitContainers | nindent 6 }} + {{- end }} containers: - name: node-exporter image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/endpoints.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/endpoints.yaml similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/endpoints.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/endpoints.yaml diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/monitor.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/monitor.yaml similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/monitor.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/monitor.yaml diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/psp-clusterrole.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/psp-clusterrole.yaml similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/psp-clusterrole.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/psp-clusterrole.yaml diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/psp-clusterrolebinding.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/psp-clusterrolebinding.yaml similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/psp-clusterrolebinding.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/psp-clusterrolebinding.yaml diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/psp.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/psp.yaml similarity index 92% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/psp.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/psp.yaml index f00506c9800..ec1259e01e7 100644 --- a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/psp.yaml +++ b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/psp.yaml @@ -6,6 +6,10 @@ metadata: name: {{ template "prometheus-node-exporter.fullname" . }} namespace: {{ template "prometheus-node-exporter.namespace" . }} labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} +{{- if .Values.rbac.pspAnnotations }} + annotations: +{{ toYaml .Values.rbac.pspAnnotations | indent 4 }} +{{- end}} spec: privileged: false # Required to prevent escalations to root. diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/service.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/service.yaml similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/service.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/service.yaml diff --git a/charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/serviceaccount.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/serviceaccount.yaml similarity index 100% rename from charts/rancher-node-exporter/rancher-node-exporter/1.16.201+up1.16.2/templates/serviceaccount.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/templates/serviceaccount.yaml diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/values.yaml b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/values.yaml similarity index 97% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/values.yaml rename to charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/values.yaml index 47dedd4d2df..5a0e6ca352a 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-node-exporter/values.yaml +++ b/charts/rancher-node-exporter/rancher-node-exporter/1.18.100+up1.18.1/values.yaml @@ -77,6 +77,7 @@ rbac: ## If true, create & use Pod Security Policy resources ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ pspEnabled: true + pspAnnotations: {} # for deployments that have node_exporter deployed outside of the cluster, list # their addresses here @@ -175,3 +176,7 @@ sidecarVolumeMount: [] ## - name: collector-textfiles ## mountPath: /run/prometheus ## readOnly: false + +## Additional InitContainers to initialize the pod +## +extraInitContainers: [] diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/.helmignore b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/.helmignore similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/.helmignore rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/.helmignore diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/Chart.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/Chart.yaml similarity index 93% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/Chart.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/Chart.yaml index 33db948e960..2e3a2ce5dce 100644 --- a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/Chart.yaml +++ b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/Chart.yaml @@ -5,7 +5,7 @@ annotations: catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-prometheus-adapter apiVersion: v1 -appVersion: v0.8.3 +appVersion: v0.8.4 description: A Helm chart for k8s prometheus adapter home: https://github.com/DirectXMan12/k8s-prometheus-adapter keywords: @@ -23,4 +23,4 @@ name: rancher-prometheus-adapter sources: - https://github.com/kubernetes/charts - https://github.com/DirectXMan12/k8s-prometheus-adapter -version: 2.12.101+up2.12.1 +version: 2.14.0 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/README.md b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/README.md similarity index 98% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/README.md rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/README.md index 1fe1fad661d..b6028b01a2c 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/README.md +++ b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/README.md @@ -118,7 +118,7 @@ Enabling this option will cause resource metrics to be served at `/apis/metrics. rules: resource: cpu: - containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>) + containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, container!=""}[3m])) by (<<.GroupBy>>) nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>) resources: overrides: @@ -130,7 +130,7 @@ rules: resource: pod containerLabel: container memory: - containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) + containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>, container!=""}) by (<<.GroupBy>>) nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) resources: overrides: diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/ci/default-values.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/ci/default-values.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/ci/default-values.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/ci/default-values.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/ci/external-rules-values.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/ci/external-rules-values.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/ci/external-rules-values.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/ci/external-rules-values.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/NOTES.txt b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/NOTES.txt similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/NOTES.txt rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/NOTES.txt diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/_helpers.tpl b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/_helpers.tpl similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/_helpers.tpl rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/_helpers.tpl diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/certmanager.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/certmanager.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/certmanager.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/certmanager.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/cluster-role-binding-auth-delegator.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/cluster-role-binding-auth-delegator.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/cluster-role-binding-auth-delegator.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/cluster-role-binding-auth-delegator.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/cluster-role-binding-resource-reader.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/cluster-role-binding-resource-reader.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/cluster-role-binding-resource-reader.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/cluster-role-binding-resource-reader.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/cluster-role-resource-reader.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/cluster-role-resource-reader.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/cluster-role-resource-reader.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/cluster-role-resource-reader.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/configmap.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/configmap.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/configmap.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/configmap.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/custom-metrics-apiservice.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/custom-metrics-apiservice.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/custom-metrics-apiservice.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/custom-metrics-apiservice.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/custom-metrics-cluster-role-binding-hpa.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/custom-metrics-cluster-role-binding-hpa.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/custom-metrics-cluster-role-binding-hpa.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/custom-metrics-cluster-role-binding-hpa.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/custom-metrics-cluster-role.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/custom-metrics-cluster-role.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/custom-metrics-cluster-role.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/custom-metrics-cluster-role.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/deployment.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/deployment.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/deployment.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/deployment.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/external-metrics-apiservice.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/external-metrics-apiservice.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/external-metrics-apiservice.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/external-metrics-apiservice.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/external-metrics-cluster-role-binding-hpa.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/external-metrics-cluster-role-binding-hpa.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/external-metrics-cluster-role-binding-hpa.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/external-metrics-cluster-role-binding-hpa.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/external-metrics-cluster-role.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/external-metrics-cluster-role.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/external-metrics-cluster-role.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/external-metrics-cluster-role.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/pdb.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/pdb.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/pdb.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/pdb.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/psp.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/psp.yaml similarity index 95% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/psp.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/psp.yaml index a88c9c2f2a0..c5ae1060747 100644 --- a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/psp.yaml +++ b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/psp.yaml @@ -12,6 +12,9 @@ metadata: spec: {{- if .Values.hostNetwork.enabled }} hostNetwork: true + hostPorts: + - min: {{ .Values.listenPort }} + max: {{ .Values.listenPort }} {{- end }} fsGroup: rule: RunAsAny diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/resource-metrics-apiservice.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/resource-metrics-apiservice.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/resource-metrics-apiservice.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/resource-metrics-apiservice.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/resource-metrics-cluster-role-binding.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/resource-metrics-cluster-role-binding.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/resource-metrics-cluster-role-binding.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/resource-metrics-cluster-role-binding.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/resource-metrics-cluster-role.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/resource-metrics-cluster-role.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/resource-metrics-cluster-role.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/resource-metrics-cluster-role.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/role-binding-auth-reader.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/role-binding-auth-reader.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/role-binding-auth-reader.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/role-binding-auth-reader.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/secret.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/secret.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/secret.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/secret.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/service.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/service.yaml similarity index 100% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/service.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/service.yaml diff --git a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/serviceaccount.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/serviceaccount.yaml similarity index 75% rename from charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/serviceaccount.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/serviceaccount.yaml index 42ef0267ebc..c3050f0528c 100644 --- a/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101+up2.12.1/templates/serviceaccount.yaml +++ b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/templates/serviceaccount.yaml @@ -9,4 +9,8 @@ metadata: heritage: {{ .Release.Service }} name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} namespace: {{ .Release.Namespace }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} {{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/values.yaml b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/values.yaml similarity index 93% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/values.yaml rename to charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/values.yaml index d9108cb9ae3..3da3cf5bfbd 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/prometheus-adapter/values.yaml +++ b/charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.14.0/values.yaml @@ -7,7 +7,7 @@ affinity: {} image: repository: rancher/mirrored-directxman12-k8s-prometheus-adapter - tag: v0.8.3 + tag: v0.8.4 pullPolicy: IfNotPresent logLevel: 4 @@ -43,6 +43,11 @@ serviceAccount: # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + # Custom DNS configuration to be added to prometheus-adapter pods dnsConfig: {} # nameservers: @@ -85,7 +90,7 @@ rules: # metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) resource: {} # cpu: -# containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>) +# containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, container!=""}[3m])) by (<<.GroupBy>>) # nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>) # resources: # overrides: @@ -97,7 +102,7 @@ rules: # resource: pod # containerLabel: container # memory: -# containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) +# containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>, container!=""}) by (<<.GroupBy>>) # nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) # resources: # overrides: diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/.helmignore b/charts/rancher-pushprox/rancher-pushprox/0.1.400/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/Chart.yaml b/charts/rancher-pushprox/rancher-pushprox/0.1.400/Chart.yaml new file mode 100644 index 00000000000..934e2584f45 --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rancher-pushprox +type: application +version: 0.1.400 diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/README.md b/charts/rancher-pushprox/rancher-pushprox/0.1.400/README.md new file mode 100644 index 00000000000..0530c56aa22 --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/README.md @@ -0,0 +1,60 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `serviceMonitor.endpoints` | A list of endpoints that will be added to the ServiceMonitor based on the [Endpoint spec](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint) | `[{port: metrics}]` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/_helpers.tpl b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/_helpers.tpl new file mode 100644 index 00000000000..458ad21cdd5 --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/_helpers.tpl @@ -0,0 +1,104 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.endpoints" -}} +{{- $proxyURL := (include "pushProxy.proxyUrl" .) -}} +{{- $useHTTPS := .Values.clients.https.enabled -}} +{{- $endpoints := .Values.serviceMonitor.endpoints }} +{{- range $endpoints }} +{{- $_ := set . "proxyUrl" $proxyURL }} +{{- if $useHTTPS -}} +{{- if (hasKey . "params") }} +{{- $_ := set (get . "params") "_scheme" (list "https") }} +{{- else }} +{{- $_ := set . "params" (dict "_scheme" (list "https")) }} +{{- end }} +{{- end }} +{{- end }} +{{- toYaml $endpoints }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients-rbac.yaml b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients-rbac.yaml new file mode 100644 index 00000000000..f1a8e7232bb --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- if .Values.clients.rbac.additionalRules }} +{{ toYaml .Values.clients.rbac.additionalRules }} +{{- end }} +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients.yaml b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients.yaml new file mode 100644 index 00000000000..3775d17b8fc --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-clients.yaml @@ -0,0 +1,145 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} +kind: DaemonSet +{{- end }} +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy-rbac.yaml b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy-rbac.yaml new file mode 100644 index 00000000000..147eb437438 --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ include "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy.yaml b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy.yaml new file mode 100644 index 00000000000..571e1313851 --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-servicemonitor.yaml b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-servicemonitor.yaml new file mode 100644 index 00000000000..7f961d6f493 --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: {{include "pushProxy.serviceMonitor.endpoints" . | nindent 4 }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-pushprox/rancher-pushprox/0.1.400/values.yaml b/charts/rancher-pushprox/rancher-pushprox/0.1.400/values.yaml new file mode 100644 index 00000000000..6ad1eab4def --- /dev/null +++ b/charts/rancher-pushprox/rancher-pushprox/0.1.400/values.yaml @@ -0,0 +1,111 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +namespaceOverride: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + # A list of endpoints that will be added to the ServiceMonitor based on the Endpoint spec + # Source: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint + # By default, proxyUrl and params._scheme will be overridden based on other values + endpoints: + - port: metrics + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + rbac: + # Additional permissions to provide to the ServiceAccount bound to the client + # This can be used to provide additional permissions for the client to scrape metrics from the k8s API + # Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true + additionalRules: [] + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + affinity: {} + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher2-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher2-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/_helpers.tpl b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/_helpers.tpl deleted file mode 100644 index 4fc68cf9754..00000000000 --- a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/_helpers.tpl +++ /dev/null @@ -1,73 +0,0 @@ -# Rancher - -{{- define "system_default_registry" -}} -{{- if .Values.global.cattle.systemDefaultRegistry -}} -{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} -{{- end -}} -{{- end -}} - -# General - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -The components in this chart create additional resources that expand the longest created name strings. -The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26. -*/}} -{{- define "windowsExporter.name" -}} -{{ printf "%s-windows-exporter" .Release.Name }} -{{- end -}} - -{{- define "windowsExporter.namespace" -}} -{{- default .Release.Namespace .Values.namespaceOverride -}} -{{- end -}} - -{{- define "windowsExporter.labels" -}} -k8s-app: {{ template "windowsExporter.name" . }} -release: {{ .Release.Name }} -component: "windows-exporter" -provider: kubernetes -{{- end -}} - -# Client - -{{- define "windowsExporter.client.nodeSelector" -}} -{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} -beta.kubernetes.io/os: windows -{{- else -}} -kubernetes.io/os: windows -{{- end -}} -{{- if .Values.clients.nodeSelector }} -{{ toYaml .Values.clients.nodeSelector }} -{{- end }} -{{- end -}} - -{{- define "windowsExporter.client.tolerations" -}} -{{- if .Values.clients.tolerations -}} -{{ toYaml .Values.clients.tolerations }} -{{- else -}} -- operator: Exists -{{- end -}} -{{- end -}} - -{{- define "windowsExporter.client.env" -}} -- name: LISTEN_PORT - value: {{ required "Need .Values.clients.port to figure out where to get metrics from" .Values.clients.port | quote }} -{{- if .Values.clients.enabledCollectors }} -- name: ENABLED_COLLECTORS - value: {{ .Values.clients.enabledCollectors | quote }} -{{- end }} -{{- if .Values.clients.env }} -{{ toYaml .Values.clients.env }} -{{- end }} -{{- end -}} - -{{- define "windowsExporter.validatePathPrefix" -}} -{{- if .Values.global.cattle.rkeWindowsPathPrefix -}} -{{- $prefixPath := (.Values.global.cattle.rkeWindowsPathPrefix | replace "/" "\\") -}} -{{- if (not (hasSuffix "\\" $prefixPath)) -}} -{{- fail (printf ".Values.global.cattle.rkeWindowsPathPrefix must end in '/' or '\\', found %s" $prefixPath) -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/.helmignore b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/.helmignore new file mode 100644 index 00000000000..0e8a0eb36f4 --- /dev/null +++ b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/Chart.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/Chart.yaml similarity index 92% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/Chart.yaml rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/Chart.yaml index 0be555fc15b..3a96d2289b5 100644 --- a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/Chart.yaml +++ b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/Chart.yaml @@ -5,11 +5,11 @@ annotations: catalog.rancher.io/namespace: cattle-monitoring-system catalog.rancher.io/release-name: rancher-windows-exporter apiVersion: v1 -appVersion: 0.0.4 +appVersion: 0.0.2 description: Sets up monitoring metrics from Windows nodes via Prometheus windows-exporter maintainers: - email: arvind.iyengar@rancher.com name: aiyengar2 name: rancher-windows-exporter type: application -version: 0.1.0 +version: 0.1.100 diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/README.md b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/README.md similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/README.md rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/README.md diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/scripts/check-wins-version.ps1 b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/scripts/check-wins-version.ps1 similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/scripts/check-wins-version.ps1 rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/scripts/check-wins-version.ps1 diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/scripts/proxy-entry.ps1 b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/scripts/proxy-entry.ps1 similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/scripts/proxy-entry.ps1 rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/scripts/proxy-entry.ps1 diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/scripts/run.ps1 b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/scripts/run.ps1 similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/scripts/run.ps1 rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/scripts/run.ps1 diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/_helpers.tpl b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/_helpers.tpl new file mode 100644 index 00000000000..16975d9d05d --- /dev/null +++ b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/_helpers.tpl @@ -0,0 +1,113 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# General + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +The components in this chart create additional resources that expand the longest created name strings. +The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26. +*/}} +{{- define "windowsExporter.name" -}} +{{ printf "%s-windows-exporter" .Release.Name }} +{{- end -}} + +{{- define "windowsExporter.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride -}} +{{- end -}} + +{{- define "windowsExporter.labels" -}} +k8s-app: {{ template "windowsExporter.name" . }} +release: {{ .Release.Name }} +component: "windows-exporter" +provider: kubernetes +{{- end -}} + +# Client + +{{- define "windowsExporter.client.nodeSelector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: windows +{{- else -}} +kubernetes.io/os: windows +{{- end -}} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector }} +{{- end }} +{{- end -}} + +{{- define "windowsExporter.client.tolerations" -}} +{{- if .Values.clients.tolerations -}} +{{ toYaml .Values.clients.tolerations }} +{{- else -}} +- operator: Exists +{{- end -}} +{{- end -}} + +{{- define "windowsExporter.client.env" -}} +- name: LISTEN_PORT + value: {{ required "Need .Values.clients.port to figure out where to get metrics from" .Values.clients.port | quote }} +{{- if .Values.clients.enabledCollectors }} +- name: ENABLED_COLLECTORS + value: {{ .Values.clients.enabledCollectors | quote }} +{{- end }} +{{- if .Values.clients.env }} +{{ toYaml .Values.clients.env }} +{{- end }} +{{- end -}} + +{{- define "windowsExporter.validatePathPrefix" -}} +{{- if .Values.global.cattle.rkeWindowsPathPrefix -}} +{{- $prefixPath := (.Values.global.cattle.rkeWindowsPathPrefix | replace "/" "\\") -}} +{{- if (not (hasSuffix "\\" $prefixPath)) -}} +{{- fail (printf ".Values.global.cattle.rkeWindowsPathPrefix must end in '/' or '\\', found %s" $prefixPath) -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "windowsExporter.renamedMetrics" -}} +{{- $renamed := dict -}} +{{/* v0.15.0 */}} +{{- $_ := set $renamed "windows_mssql_transactions_active_total" "windows_mssql_transactions_active" -}} +{{/* v0.16.0 */}} +{{- $_ := set $renamed "windows_adfs_ad_login_connection_failures" "windows_adfs_ad_login_connection_failures_total" -}} +{{- $_ := set $renamed "windows_adfs_certificate_authentications" "windows_adfs_certificate_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_device_authentications" "windows_adfs_device_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_extranet_account_lockouts" "windows_adfs_extranet_account_lockouts_total" -}} +{{- $_ := set $renamed "windows_adfs_federated_authentications" "windows_adfs_federated_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_passport_authentications" "windows_adfs_passport_authentications_total" -}} +{{- $_ := set $renamed "windows_adfs_password_change_failed" "windows_adfs_password_change_failed_total" -}} +{{- $_ := set $renamed "windows_adfs_password_change_succeeded" "windows_adfs_password_change_succeeded_total" -}} +{{- $_ := set $renamed "windows_adfs_token_requests" "windows_adfs_token_requests_total" -}} +{{- $_ := set $renamed "windows_adfs_windows_integrated_authentications" "windows_adfs_windows_integrated_authentications_total" -}} +{{- $_ := set $renamed "windows_net_packets_outbound_errors" "windows_net_packets_outbound_errors_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_discarded" "windows_net_packets_received_discarded_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_errors" "windows_net_packets_received_errors_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_total" "windows_net_packets_received_total_total" -}} +{{- $_ := set $renamed "windows_net_packets_received_unknown" "windows_net_packets_received_unknown_total" -}} +{{- $_ := set $renamed "windows_dns_memory_used_bytes_total" "windows_dns_memory_used_bytes" -}} +{{- $renamed | toJson -}} +{{- end -}} + +{{- define "windowsExporter.renamedMetricsRelabeling" -}} +{{- range $original, $new := (include "windowsExporter.renamedMetrics" . | fromJson) -}} +- sourceLabels: [__name__] + regex: {{ $original }} + replacement: '{{ $new }}' + targetLabel: __name__ +{{ end -}} +{{- end -}} + +{{- define "windowsExporter.renamedMetricsRules" -}} +{{- range $original, $new := (include "windowsExporter.renamedMetrics" . | fromJson) -}} +- record: {{ $original }} + expr: {{ $new }} +{{ end -}} +{{- end -}} diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/configmap.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/configmap.yaml similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/configmap.yaml rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/configmap.yaml diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/daemonset.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/daemonset.yaml similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/daemonset.yaml rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/daemonset.yaml diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/prometheusrule.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/prometheusrule.yaml new file mode 100644 index 00000000000..f31983122a6 --- /dev/null +++ b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/prometheusrule.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.prometheusRule .Values.clients }}{{- if and .Values.prometheusRule.enabled .Values.clients.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + labels: {{ include "windowsExporter.labels" . | nindent 4 }} + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} +spec: + groups: + - name: windows-exporter-relabel.rules + rules: +{{- include "windowsExporter.renamedMetricsRules" . | nindent 4 -}} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/rbac.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/rbac.yaml similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/rbac.yaml rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/rbac.yaml diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/service.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/service.yaml similarity index 100% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/service.yaml rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/service.yaml diff --git a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/servicemonitor.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/servicemonitor.yaml similarity index 88% rename from charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/servicemonitor.yaml rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/servicemonitor.yaml index a2c2f0b54e2..26ece9b05a2 100644 --- a/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.0/templates/servicemonitor.yaml +++ b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/templates/servicemonitor.yaml @@ -17,14 +17,11 @@ spec: endpoints: - port: windows-metrics metricRelabelings: +{{- include "windowsExporter.renamedMetricsRelabeling" . | nindent 4 -}} - sourceLabels: [__name__] regex: 'wmi_(.*)' replacement: 'windows_$1' targetLabel: __name__ - - sourceLabels: [__name__] - regex: windows_mssql_transactions_active_total - replacement: 'windows_mssql_transactions_active' - targetLabel: __name__ - sourceLabels: [volume, nic] regex: (.*);(.*) separator: '' diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/values.yaml b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/values.yaml similarity index 91% rename from charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/values.yaml rename to charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/values.yaml index 6130890bd83..aa1fd197355 100644 --- a/charts/rancher-monitoring/rancher-monitoring/14.5.101+up14.5.0/charts/windowsExporter/values.yaml +++ b/charts/rancher-windows-exporter/rancher-windows-exporter/0.1.100/values.yaml @@ -13,6 +13,10 @@ global: serviceMonitor: enabled: true +# Configure PrometheusRule that renames existing metrics +prometheusRule: + enabled: true + ## Components scraping metrics from Windows nodes ## clients: @@ -21,7 +25,7 @@ clients: port: 9796 image: repository: rancher/windows_exporter-package - tag: v0.0.1 + tag: v0.0.2 os: "windows" # Specify the IP addresses of nodes that you want to collect metrics from