@@ -132,15 +134,18 @@ const SalesUploadPage = (props) => {
);
};
-SalesUploadPage.defaultProps = {
+CreditRequestsUploadPage.defaultProps = {
errorMessage: '',
+ icbcDate: '',
};
-SalesUploadPage.propTypes = {
+CreditRequestsUploadPage.propTypes = {
errorMessage: PropTypes.string,
files: PropTypes.arrayOf(PropTypes.shape()).isRequired,
+ icbcDate: PropTypes.string,
+ setErrorMessage: PropTypes.func.isRequired,
setUploadFiles: PropTypes.func.isRequired,
upload: PropTypes.func.isRequired,
};
-export default SalesUploadPage;
+export default CreditRequestsUploadPage;
From 313737062823bbc574b168d7496cf9e7d248f5b1 Mon Sep 17 00:00:00 2001
From: Kuan Fan <31664961+kuanfandevops@users.noreply.github.com>
Date: Thu, 7 Jan 2021 12:57:53 -0800
Subject: [PATCH 5/7] Remove openshift v3 and add upload process pod for
testing (#488)
* remove openshift v3
* add separate upload pod
---
.jenkins-v3/.pipeline/.nvmrc | 2 -
.jenkins-v3/.pipeline/build.js | 5 -
.jenkins-v3/.pipeline/clean.js | 5 -
.jenkins-v3/.pipeline/deploy.js | 5 -
.jenkins-v3/.pipeline/lib/build.js | 38 -
.jenkins-v3/.pipeline/lib/clean.js | 77 --
.jenkins-v3/.pipeline/lib/config.js | 20 -
.jenkins-v3/.pipeline/lib/deploy.js | 44 -
.jenkins-v3/.pipeline/npmw | 12 -
.jenkins-v3/.pipeline/package-lock.json | 89 --
.jenkins-v3/.pipeline/package.json | 23 -
.jenkins-v3/Jenkinsfile | 57 -
.jenkins-v3/README.md | 43 -
.jenkins-v3/docker/Dockerfile | 11 -
.../contrib/jenkins/configuration/config.xml | 110 --
.../configuration/jobs/_jenkins/config.xml | 64 --
.../jobs/zeva-release-pipelines/config.xml | 22 -
.../jobs/develop-pipeline/config.xml | 36 -
.../configuration/jobs/zeva/config.xml | 64 --
.../scripts.groovy.d/on-gh-event.groovy | 133 ---
.jenkins-v3/openshift/build-master.yaml | 81 --
.jenkins-v3/openshift/build-slave.yaml | 110 --
.jenkins-v3/openshift/deploy-master.yaml | 297 ------
.jenkins-v3/openshift/deploy-prereq.yaml | 31 -
.jenkins-v3/openshift/deploy-slave.yaml | 164 ---
.jenkins-v3/openshift/secrets.json | 50 -
.pipeline-v3/.nvmrc | 1 -
.pipeline-v3/build.js | 5 -
.pipeline-v3/clean.js | 5 -
.pipeline-v3/deploy-unittest.js | 5 -
.pipeline-v3/deploy.js | 7 -
.pipeline-v3/lib/build.js | 57 -
.pipeline-v3/lib/clean.js | 134 ---
.pipeline-v3/lib/config.js | 55 -
.pipeline-v3/lib/deploy-unittest.js | 67 --
.pipeline-v3/lib/deploy.js | 210 ----
.pipeline-v3/lib/keycloak.js | 137 ---
.pipeline-v3/npmw | 12 -
.pipeline-v3/package-lock.json | 988 ------------------
.pipeline-v3/package.json | 27 -
.yo-rc.json-v3 | 54 -
Jenkinsfile-v3 | 67 --
openshift-v3/README.md | 300 ------
openshift-v3/templates/README.md | 57 -
openshift-v3/templates/backend/README.md | 25 -
.../templates/backend/backend-autoscaler.yaml | 36 -
.../templates/backend/backend-bc.yaml | 95 --
.../templates/backend/backend-dc.yaml | 426 --------
.../backend/django-secret-template.yaml | 17 -
.../backend/email-service-secret.yaml | 26 -
.../backup-container-2.0.0/.gitattributes | 12 -
.../backup-container-2.0.0/.gitignore | 16 -
.../backup-container-2.0.0/CODE_OF_CONDUCT.md | 46 -
.../backup-container-2.0.0/CONTRIBUTING.md | 10 -
.../templates/backup-container-2.0.0/LICENSE | 201 ----
.../backup-container-2.0.0/README.md | 368 -------
.../backup-container-2.0.0/config/backup.conf | 52 -
.../backup-container-2.0.0/docker/Dockerfile | 42 -
.../docker/Dockerfile_Mongo | 42 -
.../docker/backup.config.utils | 485 ---------
.../docker/backup.container.utils | 57 -
.../docker/backup.file.utils | 233 -----
.../backup-container-2.0.0/docker/backup.ftp | 23 -
.../docker/backup.logging | 111 --
.../docker/backup.misc.utils | 30 -
.../docker/backup.mongo.plugin | 226 ----
.../docker/backup.null.plugin | 195 ----
.../docker/backup.postgres.plugin | 247 -----
.../docker/backup.server.utils | 39 -
.../docker/backup.settings | 55 -
.../backup-container-2.0.0/docker/backup.sh | 140 ---
.../docker/backup.usage | 133 ---
.../docker/backup.utils | 268 -----
.../docker/webhook-template.json | 6 -
.../backup-container-2.0.0/docs/ExampleLog.md | 62 --
.../docs/SampleRocketChatErrorMessage.png | Bin 18609 -> 0 bytes
.../docs/SampleRocketChatMessage.png | Bin 38568 -> 0 bytes
.../docs/TipsAndTricks.md | 75 --
.../openshift/templates/backup/README.md | 71 --
.../templates/backup/backup-build.json | 102 --
.../templates/backup/backup-deploy.json | 545 ----------
.../scripts/rocket.chat.integration.js | 50 -
openshift-v3/templates/config/README.md | 10 -
openshift-v3/templates/config/configmap.yaml | 86 --
openshift-v3/templates/frontend/README.md | 16 -
.../frontend/frontend-autoscaler.yaml | 36 -
.../templates/frontend/frontend-bc.yaml | 82 --
.../frontend/frontend-configmap.yaml | 37 -
.../templates/frontend/frontend-dc.yaml | 285 -----
openshift-v3/templates/jenkins/README.md | 13 -
.../templates/jenkins/jenkins-bc.yaml | 61 --
openshift-v3/templates/keycloak/README.md | 9 -
.../templates/keycloak/keycloak-secret.yaml | 37 -
openshift-v3/templates/maintenance/Caddyfile | 18 -
.../templates/maintenance/maintenance.html | 18 -
.../maintenance/openshift/maintenance-bc.yaml | 58 -
.../maintenance/openshift/maintenance-dc.yaml | 100 --
openshift-v3/templates/minio/README.md | 22 -
.../templates/minio/docker/Dockerfile | 33 -
.../templates/minio/docker/entrypoint.sh | 3 -
openshift-v3/templates/minio/minio-bc.yaml | 64 --
openshift-v3/templates/minio/minio-dc.yaml | 227 ----
.../templates/minio/minio-secret.yaml | 25 -
openshift-v3/templates/nagios/.kube/.empty | 0
openshift-v3/templates/nagios/Dockerfile | 49 -
openshift-v3/templates/nagios/Dockerfile-base | 11 -
openshift-v3/templates/nagios/README.md | 24 -
.../templates/nagios/apache2/apache2.conf | 52 -
.../apache2/mods-enabled/access_compat.load | 1 -
.../nagios/apache2/mods-enabled/alias.conf | 1 -
.../nagios/apache2/mods-enabled/alias.load | 1 -
.../apache2/mods-enabled/auth_basic.load | 1 -
.../apache2/mods-enabled/authn_core.load | 1 -
.../apache2/mods-enabled/authn_file.load | 1 -
.../apache2/mods-enabled/authz_core.load | 1 -
.../apache2/mods-enabled/authz_host.load | 1 -
.../apache2/mods-enabled/authz_user.load | 1 -
.../apache2/mods-enabled/autoindex.conf | 1 -
.../apache2/mods-enabled/autoindex.load | 1 -
.../nagios/apache2/mods-enabled/cgi.load | 1 -
.../nagios/apache2/mods-enabled/deflate.conf | 1 -
.../nagios/apache2/mods-enabled/deflate.load | 1 -
.../nagios/apache2/mods-enabled/dir.conf | 1 -
.../nagios/apache2/mods-enabled/dir.load | 1 -
.../nagios/apache2/mods-enabled/env.load | 1 -
.../nagios/apache2/mods-enabled/filter.load | 1 -
.../nagios/apache2/mods-enabled/mime.conf | 1 -
.../nagios/apache2/mods-enabled/mime.load | 1 -
.../apache2/mods-enabled/mpm_prefork.conf | 1 -
.../apache2/mods-enabled/mpm_prefork.load | 1 -
.../apache2/mods-enabled/negotiation.conf | 1 -
.../apache2/mods-enabled/negotiation.load | 1 -
.../nagios/apache2/mods-enabled/php5.conf | 1 -
.../nagios/apache2/mods-enabled/php5.load | 1 -
.../apache2/mods-enabled/reqtimeout.conf | 1 -
.../apache2/mods-enabled/reqtimeout.load | 1 -
.../nagios/apache2/mods-enabled/setenvif.conf | 1 -
.../nagios/apache2/mods-enabled/setenvif.load | 1 -
.../nagios/apache2/mods-enabled/status.conf | 1 -
.../nagios/apache2/mods-enabled/status.load | 1 -
.../templates/nagios/docroot/emptyFile.txt | 0
.../templates/nagios/nagios-base-bc.yaml | 38 -
openshift-v3/templates/nagios/nagios-bc.yaml | 65 --
openshift-v3/templates/nagios/nagios-dc.yaml | 271 -----
.../templates/nagios/nagios-secret.yaml | 20 -
openshift-v3/templates/nagios/nagios3/cgi.cfg | 377 -------
.../templates/nagios/nagios3/cleanup-cfg.sh | 16 -
.../templates/nagios/nagios3/commands.cfg | 91 --
.../nagios3/commands/check_diskusage.sh | 22 -
.../commands/check_email_connection.py | 16 -
.../commands/check_email_connection.sh | 10 -
.../nagios/nagios3/commands/check_host.sh | 4 -
.../commands/check_keycloak_connection.py | 62 --
.../commands/check_keycloak_connection.sh | 10 -
.../commands/check_minio_connection.py | 26 -
.../commands/check_minio_connection.sh | 12 -
.../nagios3/commands/check_patroni_health.sh | 22 -
.../commands/check_postgresql_liveness.py | 28 -
.../commands/check_postgresql_liveness.sh | 26 -
.../commands/check_rabbitmq_connection.py | 29 -
.../commands/check_rabbitmq_connection.sh | 10 -
.../nagios/nagios3/commands/check_replicas.sh | 21 -
.../nagios3/commands/notify_by_email.py | 31 -
.../nagios/nagios3/conf.d/contact-groups.cfg | 5 -
.../nagios/nagios3/conf.d/contacts.cfg | 27 -
.../nagios/nagios3/conf.d/host-groups-dev.cfg | 5 -
.../nagios3/conf.d/host-groups-prod.cfg | 6 -
.../nagios3/conf.d/host-groups-test.cfg | 5 -
.../nagios/nagios3/conf.d/hosts-dev.cfg | 95 --
.../nagios/nagios3/conf.d/hosts-prod.cfg | 95 --
.../nagios/nagios3/conf.d/hosts-test.cfg | 95 --
.../nagios3/conf.d/services-other-dev.cfg | 78 --
.../nagios3/conf.d/services-other-prod.cfg | 78 --
.../nagios3/conf.d/services-other-test.cfg | 78 --
.../nagios3/conf.d/services-replica-dev.cfg | 65 --
.../nagios3/conf.d/services-replica-prod.cfg | 65 --
.../nagios3/conf.d/services-replica-test.cfg | 65 --
.../nagios/nagios3/conf.d/timeperiods.cfg | 11 -
.../templates/nagios/nagios3/nagios.cfg | 111 --
.../templates/nagios/nagios3/resource.cfg | 31 -
.../nagios/nagios3/stylesheets/avail.css | 35 -
.../nagios3/stylesheets/checksanity.css | 27 -
.../nagios/nagios3/stylesheets/cmd.css | 14 -
.../nagios/nagios3/stylesheets/common.css | 370 -------
.../nagios/nagios3/stylesheets/config.css | 11 -
.../nagios/nagios3/stylesheets/extinfo.css | 84 --
.../nagios/nagios3/stylesheets/histogram.css | 10 -
.../nagios/nagios3/stylesheets/history.css | 8 -
.../nagios/nagios3/stylesheets/ministatus.css | 64 --
.../nagios3/stylesheets/notifications.css | 29 -
.../nagios/nagios3/stylesheets/outages.css | 15 -
.../nagios/nagios3/stylesheets/showlog.css | 8 -
.../nagios/nagios3/stylesheets/status.css | 88 --
.../nagios/nagios3/stylesheets/statusmap.css | 14 -
.../nagios/nagios3/stylesheets/summary.css | 30 -
.../nagios/nagios3/stylesheets/tac.css | 75 --
.../nagios/nagios3/stylesheets/trends.css | 8 -
.../nagios/supervisord/supervisord.conf | 21 -
openshift-v3/templates/nsp/README.MD | 48 -
openshift-v3/templates/nsp/aporeto-setup.md | 111 --
openshift-v3/templates/nsp/nsp-env.yaml | 88 --
openshift-v3/templates/nsp/nsp-tools.yaml | 44 -
.../templates/nsp/quickstart-nsp.yaml | 51 -
.../templates/patroni/.pipeline/build.js | 5 -
.../templates/patroni/.pipeline/clean.js | 4 -
.../templates/patroni/.pipeline/deploy.js | 6 -
.../templates/patroni/.pipeline/lib/build.js | 25 -
.../templates/patroni/.pipeline/lib/clean.js | 22 -
.../templates/patroni/.pipeline/lib/config.js | 13 -
.../templates/patroni/.pipeline/lib/deploy.js | 12 -
openshift-v3/templates/patroni/.pipeline/npmw | 4 -
.../templates/patroni/.pipeline/package.json | 27 -
.../templates/patroni/.pipeline/test/e2e.js | 173 ---
openshift-v3/templates/patroni/README.md | 40 -
openshift-v3/templates/patroni/build.yaml | 110 --
.../templates/patroni/deployment-prereq.yaml | 132 ---
.../templates/patroni/deployment.yaml | 336 ------
.../templates/patroni/docker/Dockerfile | 43 -
.../docker/contrib/root/usr/bin/entrypoint.sh | 57 -
.../usr/share/scripts/patroni/health_check.sh | 5 -
.../usr/share/scripts/patroni/post_init.sh | 17 -
.../templates/patroni/secret-template.yaml | 56 -
openshift-v3/templates/postgresql/create.sh | 3 -
.../templates/postgresql/nfs_storage.yaml | 46 -
.../postgresql/postgresql-dc-release.yaml | 181 ----
.../postgresql/postgresql-dc-unittest.yaml | 171 ---
.../templates/postgresql/postgresql-dc.yaml | 209 ----
.../python-backend/python-backend-bc.yaml | 55 -
.../python-backend/python-backend-dc.yaml | 171 ---
openshift-v3/templates/rabbitmq/README.md | 22 -
openshift-v3/templates/rabbitmq/create.sh | 13 -
.../templates/rabbitmq/docker/Dockerfile | 8 -
.../templates/rabbitmq/docker/policy.json | 67 --
.../templates/rabbitmq/rabbitmq-bc.yaml | 73 --
.../rabbitmq/rabbitmq-cluster-dc.yaml | 317 ------
.../rabbitmq-secret-configmap-only.yaml | 62 --
.../rabbitmq/rabbitmq-web-route.yaml | 53 -
.../templates/rabbitmq/secret-template.yaml | 27 -
.../rabbitmq/zeva-rabbitmq-secret.yaml | 21 -
openshift-v3/templates/redirect/Caddyfile | 18 -
openshift-v3/templates/redirect/migrated.html | 20 -
.../redirect/openshift/redirect-bc.yaml | 58 -
.../redirect/openshift/redirect-dc.yaml | 100 --
openshift-v3/templates/schemaspy/README.md | 5 -
.../templates/schemaspy/schemaspy-bc.yaml | 44 -
.../templates/schemaspy/schemaspy-dc.yaml | 192 ----
openshift-v3/templates/unittest/README.md | 7 -
.../unittest/postgresql-dc-unittest.yaml | 193 ----
.../unittest/zeva-postgresql-init.yaml | 32 -
openshift-v3/v4-migration/Readme.md | 62 --
openshift/templates/backend/README.md | 14 +
.../templates/backend/upload-process-dc.yaml | 95 +-
252 files changed, 59 insertions(+), 16498 deletions(-)
delete mode 100644 .jenkins-v3/.pipeline/.nvmrc
delete mode 100755 .jenkins-v3/.pipeline/build.js
delete mode 100755 .jenkins-v3/.pipeline/clean.js
delete mode 100755 .jenkins-v3/.pipeline/deploy.js
delete mode 100755 .jenkins-v3/.pipeline/lib/build.js
delete mode 100755 .jenkins-v3/.pipeline/lib/clean.js
delete mode 100644 .jenkins-v3/.pipeline/lib/config.js
delete mode 100755 .jenkins-v3/.pipeline/lib/deploy.js
delete mode 100755 .jenkins-v3/.pipeline/npmw
delete mode 100644 .jenkins-v3/.pipeline/package-lock.json
delete mode 100644 .jenkins-v3/.pipeline/package.json
delete mode 100644 .jenkins-v3/Jenkinsfile
delete mode 100644 .jenkins-v3/README.md
delete mode 100644 .jenkins-v3/docker/Dockerfile
delete mode 100644 .jenkins-v3/docker/contrib/jenkins/configuration/config.xml
delete mode 100644 .jenkins-v3/docker/contrib/jenkins/configuration/jobs/_jenkins/config.xml
delete mode 100644 .jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/config.xml
delete mode 100644 .jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/jobs/develop-pipeline/config.xml
delete mode 100644 .jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva/config.xml
delete mode 100644 .jenkins-v3/docker/contrib/jenkins/configuration/scripts.groovy.d/on-gh-event.groovy
delete mode 100644 .jenkins-v3/openshift/build-master.yaml
delete mode 100644 .jenkins-v3/openshift/build-slave.yaml
delete mode 100644 .jenkins-v3/openshift/deploy-master.yaml
delete mode 100644 .jenkins-v3/openshift/deploy-prereq.yaml
delete mode 100644 .jenkins-v3/openshift/deploy-slave.yaml
delete mode 100644 .jenkins-v3/openshift/secrets.json
delete mode 100644 .pipeline-v3/.nvmrc
delete mode 100755 .pipeline-v3/build.js
delete mode 100755 .pipeline-v3/clean.js
delete mode 100644 .pipeline-v3/deploy-unittest.js
delete mode 100755 .pipeline-v3/deploy.js
delete mode 100755 .pipeline-v3/lib/build.js
delete mode 100755 .pipeline-v3/lib/clean.js
delete mode 100644 .pipeline-v3/lib/config.js
delete mode 100644 .pipeline-v3/lib/deploy-unittest.js
delete mode 100755 .pipeline-v3/lib/deploy.js
delete mode 100644 .pipeline-v3/lib/keycloak.js
delete mode 100755 .pipeline-v3/npmw
delete mode 100644 .pipeline-v3/package-lock.json
delete mode 100644 .pipeline-v3/package.json
delete mode 100644 .yo-rc.json-v3
delete mode 100644 Jenkinsfile-v3
delete mode 100644 openshift-v3/README.md
delete mode 100644 openshift-v3/templates/README.md
delete mode 100644 openshift-v3/templates/backend/README.md
delete mode 100644 openshift-v3/templates/backend/backend-autoscaler.yaml
delete mode 100644 openshift-v3/templates/backend/backend-bc.yaml
delete mode 100644 openshift-v3/templates/backend/backend-dc.yaml
delete mode 100644 openshift-v3/templates/backend/django-secret-template.yaml
delete mode 100644 openshift-v3/templates/backend/email-service-secret.yaml
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/.gitattributes
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/.gitignore
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/CODE_OF_CONDUCT.md
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/CONTRIBUTING.md
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/LICENSE
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/README.md
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/config/backup.conf
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile_Mongo
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.config.utils
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.container.utils
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.file.utils
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.ftp
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.logging
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.misc.utils
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.mongo.plugin
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.null.plugin
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.postgres.plugin
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.server.utils
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.settings
delete mode 100755 openshift-v3/templates/backup-container-2.0.0/docker/backup.sh
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.usage
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/backup.utils
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docker/webhook-template.json
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docs/ExampleLog.md
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docs/SampleRocketChatErrorMessage.png
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docs/SampleRocketChatMessage.png
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/docs/TipsAndTricks.md
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/openshift/templates/backup/README.md
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/openshift/templates/backup/backup-build.json
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/openshift/templates/backup/backup-deploy.json
delete mode 100644 openshift-v3/templates/backup-container-2.0.0/scripts/rocket.chat.integration.js
delete mode 100644 openshift-v3/templates/config/README.md
delete mode 100644 openshift-v3/templates/config/configmap.yaml
delete mode 100644 openshift-v3/templates/frontend/README.md
delete mode 100644 openshift-v3/templates/frontend/frontend-autoscaler.yaml
delete mode 100644 openshift-v3/templates/frontend/frontend-bc.yaml
delete mode 100644 openshift-v3/templates/frontend/frontend-configmap.yaml
delete mode 100644 openshift-v3/templates/frontend/frontend-dc.yaml
delete mode 100644 openshift-v3/templates/jenkins/README.md
delete mode 100644 openshift-v3/templates/jenkins/jenkins-bc.yaml
delete mode 100644 openshift-v3/templates/keycloak/README.md
delete mode 100644 openshift-v3/templates/keycloak/keycloak-secret.yaml
delete mode 100644 openshift-v3/templates/maintenance/Caddyfile
delete mode 100644 openshift-v3/templates/maintenance/maintenance.html
delete mode 100644 openshift-v3/templates/maintenance/openshift/maintenance-bc.yaml
delete mode 100644 openshift-v3/templates/maintenance/openshift/maintenance-dc.yaml
delete mode 100644 openshift-v3/templates/minio/README.md
delete mode 100644 openshift-v3/templates/minio/docker/Dockerfile
delete mode 100755 openshift-v3/templates/minio/docker/entrypoint.sh
delete mode 100644 openshift-v3/templates/minio/minio-bc.yaml
delete mode 100644 openshift-v3/templates/minio/minio-dc.yaml
delete mode 100644 openshift-v3/templates/minio/minio-secret.yaml
delete mode 100644 openshift-v3/templates/nagios/.kube/.empty
delete mode 100644 openshift-v3/templates/nagios/Dockerfile
delete mode 100644 openshift-v3/templates/nagios/Dockerfile-base
delete mode 100644 openshift-v3/templates/nagios/README.md
delete mode 100644 openshift-v3/templates/nagios/apache2/apache2.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/access_compat.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/alias.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/alias.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/auth_basic.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/authn_core.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/authn_file.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/authz_core.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/authz_host.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/authz_user.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/autoindex.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/autoindex.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/cgi.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/deflate.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/deflate.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/dir.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/dir.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/env.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/filter.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/mime.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/mime.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/mpm_prefork.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/mpm_prefork.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/negotiation.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/negotiation.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/php5.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/php5.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/reqtimeout.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/reqtimeout.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/setenvif.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/setenvif.load
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/status.conf
delete mode 120000 openshift-v3/templates/nagios/apache2/mods-enabled/status.load
delete mode 100644 openshift-v3/templates/nagios/docroot/emptyFile.txt
delete mode 100644 openshift-v3/templates/nagios/nagios-base-bc.yaml
delete mode 100644 openshift-v3/templates/nagios/nagios-bc.yaml
delete mode 100644 openshift-v3/templates/nagios/nagios-dc.yaml
delete mode 100644 openshift-v3/templates/nagios/nagios-secret.yaml
delete mode 100644 openshift-v3/templates/nagios/nagios3/cgi.cfg
delete mode 100755 openshift-v3/templates/nagios/nagios3/cleanup-cfg.sh
delete mode 100644 openshift-v3/templates/nagios/nagios3/commands.cfg
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_diskusage.sh
delete mode 100644 openshift-v3/templates/nagios/nagios3/commands/check_email_connection.py
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_email_connection.sh
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_host.sh
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_keycloak_connection.py
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_keycloak_connection.sh
delete mode 100644 openshift-v3/templates/nagios/nagios3/commands/check_minio_connection.py
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_minio_connection.sh
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_patroni_health.sh
delete mode 100644 openshift-v3/templates/nagios/nagios3/commands/check_postgresql_liveness.py
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_postgresql_liveness.sh
delete mode 100644 openshift-v3/templates/nagios/nagios3/commands/check_rabbitmq_connection.py
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_rabbitmq_connection.sh
delete mode 100755 openshift-v3/templates/nagios/nagios3/commands/check_replicas.sh
delete mode 100644 openshift-v3/templates/nagios/nagios3/commands/notify_by_email.py
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/contact-groups.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/contacts.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/host-groups-dev.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/host-groups-prod.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/host-groups-test.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/hosts-dev.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/hosts-prod.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/hosts-test.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/services-other-dev.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/services-other-prod.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/services-other-test.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/services-replica-dev.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/services-replica-prod.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/services-replica-test.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/conf.d/timeperiods.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/nagios.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/resource.cfg
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/avail.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/checksanity.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/cmd.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/common.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/config.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/extinfo.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/histogram.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/history.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/ministatus.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/notifications.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/outages.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/showlog.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/status.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/statusmap.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/summary.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/tac.css
delete mode 100644 openshift-v3/templates/nagios/nagios3/stylesheets/trends.css
delete mode 100644 openshift-v3/templates/nagios/supervisord/supervisord.conf
delete mode 100644 openshift-v3/templates/nsp/README.MD
delete mode 100644 openshift-v3/templates/nsp/aporeto-setup.md
delete mode 100644 openshift-v3/templates/nsp/nsp-env.yaml
delete mode 100644 openshift-v3/templates/nsp/nsp-tools.yaml
delete mode 100644 openshift-v3/templates/nsp/quickstart-nsp.yaml
delete mode 100755 openshift-v3/templates/patroni/.pipeline/build.js
delete mode 100755 openshift-v3/templates/patroni/.pipeline/clean.js
delete mode 100755 openshift-v3/templates/patroni/.pipeline/deploy.js
delete mode 100755 openshift-v3/templates/patroni/.pipeline/lib/build.js
delete mode 100755 openshift-v3/templates/patroni/.pipeline/lib/clean.js
delete mode 100644 openshift-v3/templates/patroni/.pipeline/lib/config.js
delete mode 100755 openshift-v3/templates/patroni/.pipeline/lib/deploy.js
delete mode 100755 openshift-v3/templates/patroni/.pipeline/npmw
delete mode 100644 openshift-v3/templates/patroni/.pipeline/package.json
delete mode 100644 openshift-v3/templates/patroni/.pipeline/test/e2e.js
delete mode 100644 openshift-v3/templates/patroni/README.md
delete mode 100644 openshift-v3/templates/patroni/build.yaml
delete mode 100644 openshift-v3/templates/patroni/deployment-prereq.yaml
delete mode 100644 openshift-v3/templates/patroni/deployment.yaml
delete mode 100644 openshift-v3/templates/patroni/docker/Dockerfile
delete mode 100755 openshift-v3/templates/patroni/docker/contrib/root/usr/bin/entrypoint.sh
delete mode 100755 openshift-v3/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/health_check.sh
delete mode 100755 openshift-v3/templates/patroni/docker/contrib/root/usr/share/scripts/patroni/post_init.sh
delete mode 100644 openshift-v3/templates/patroni/secret-template.yaml
delete mode 100755 openshift-v3/templates/postgresql/create.sh
delete mode 100644 openshift-v3/templates/postgresql/nfs_storage.yaml
delete mode 100644 openshift-v3/templates/postgresql/postgresql-dc-release.yaml
delete mode 100644 openshift-v3/templates/postgresql/postgresql-dc-unittest.yaml
delete mode 100644 openshift-v3/templates/postgresql/postgresql-dc.yaml
delete mode 100644 openshift-v3/templates/python-backend/python-backend-bc.yaml
delete mode 100644 openshift-v3/templates/python-backend/python-backend-dc.yaml
delete mode 100644 openshift-v3/templates/rabbitmq/README.md
delete mode 100644 openshift-v3/templates/rabbitmq/create.sh
delete mode 100644 openshift-v3/templates/rabbitmq/docker/Dockerfile
delete mode 100644 openshift-v3/templates/rabbitmq/docker/policy.json
delete mode 100644 openshift-v3/templates/rabbitmq/rabbitmq-bc.yaml
delete mode 100644 openshift-v3/templates/rabbitmq/rabbitmq-cluster-dc.yaml
delete mode 100644 openshift-v3/templates/rabbitmq/rabbitmq-secret-configmap-only.yaml
delete mode 100644 openshift-v3/templates/rabbitmq/rabbitmq-web-route.yaml
delete mode 100644 openshift-v3/templates/rabbitmq/secret-template.yaml
delete mode 100644 openshift-v3/templates/rabbitmq/zeva-rabbitmq-secret.yaml
delete mode 100644 openshift-v3/templates/redirect/Caddyfile
delete mode 100644 openshift-v3/templates/redirect/migrated.html
delete mode 100644 openshift-v3/templates/redirect/openshift/redirect-bc.yaml
delete mode 100644 openshift-v3/templates/redirect/openshift/redirect-dc.yaml
delete mode 100644 openshift-v3/templates/schemaspy/README.md
delete mode 100644 openshift-v3/templates/schemaspy/schemaspy-bc.yaml
delete mode 100644 openshift-v3/templates/schemaspy/schemaspy-dc.yaml
delete mode 100644 openshift-v3/templates/unittest/README.md
delete mode 100644 openshift-v3/templates/unittest/postgresql-dc-unittest.yaml
delete mode 100644 openshift-v3/templates/unittest/zeva-postgresql-init.yaml
delete mode 100644 openshift-v3/v4-migration/Readme.md
rename openshift-v3/templates/unittest/backend-dc-unittest.yaml => openshift/templates/backend/upload-process-dc.yaml (83%)
diff --git a/.jenkins-v3/.pipeline/.nvmrc b/.jenkins-v3/.pipeline/.nvmrc
deleted file mode 100644
index 851c9ba3c..000000000
--- a/.jenkins-v3/.pipeline/.nvmrc
+++ /dev/null
@@ -1,2 +0,0 @@
-v10.15.2
-
diff --git a/.jenkins-v3/.pipeline/build.js b/.jenkins-v3/.pipeline/build.js
deleted file mode 100755
index 3ac899f86..000000000
--- a/.jenkins-v3/.pipeline/build.js
+++ /dev/null
@@ -1,5 +0,0 @@
-'use strict';
-const task = require('./lib/build.js')
-const settings = require('./lib/config.js')
-
-task(Object.assign(settings, { phase: 'build'}))
diff --git a/.jenkins-v3/.pipeline/clean.js b/.jenkins-v3/.pipeline/clean.js
deleted file mode 100755
index 42231d7ff..000000000
--- a/.jenkins-v3/.pipeline/clean.js
+++ /dev/null
@@ -1,5 +0,0 @@
-'use strict';
-const settings = require('./lib/config.js')
-const task = require('./lib/clean.js')
-
-task(Object.assign(settings, { phase: settings.options.env}));
diff --git a/.jenkins-v3/.pipeline/deploy.js b/.jenkins-v3/.pipeline/deploy.js
deleted file mode 100755
index 595509459..000000000
--- a/.jenkins-v3/.pipeline/deploy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-'use strict';
-const settings = require('./lib/config.js')
-const task = require('./lib/deploy.js')
-
-task(Object.assign(settings, { phase: settings.options.env}));
diff --git a/.jenkins-v3/.pipeline/lib/build.js b/.jenkins-v3/.pipeline/lib/build.js
deleted file mode 100755
index 8d6d5d2d4..000000000
--- a/.jenkins-v3/.pipeline/lib/build.js
+++ /dev/null
@@ -1,38 +0,0 @@
-'use strict';
-const {OpenShiftClientX} = require('@bcgov/pipeline-cli')
-const path = require('path');
-
-module.exports = (settings)=>{
- const phases=settings.phases
- const options = settings.options
- const oc=new OpenShiftClientX(Object.assign({'namespace':phases.build.namespace}, options));
- const phase='build'
- var objects = []
-
- const templatesLocalBaseUrl =oc.toFileUrl(path.resolve(__dirname, '../../openshift'))
-
- objects.push(...oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/build-master.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'SOURCE_REPOSITORY_URL': oc.git.http_url,
- 'SOURCE_REPOSITORY_REF': oc.git.ref,
- 'SOURCE_IMAGE_STREAM_NAMESPACE': phases[phase].namespace,
- 'SOURCE_IMAGE_STREAM_TAG': 'bcgov-jenkins-basic:v2-20200303'
- }
- }));
-
- objects.push(...oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/build-slave.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'SOURCE_IMAGE_STREAM_TAG': `${phases[phase].name}:${phases[phase].tag}`,
- 'SLAVE_NAME':'main'
- }
- }));
-
- oc.applyRecommendedLabels(objects, phases[phase].name, phase, phases[phase].changeId, phases[phase].instance)
- oc.applyAndBuild(objects)
-}
diff --git a/.jenkins-v3/.pipeline/lib/clean.js b/.jenkins-v3/.pipeline/lib/clean.js
deleted file mode 100755
index b9cfe87f8..000000000
--- a/.jenkins-v3/.pipeline/lib/clean.js
+++ /dev/null
@@ -1,77 +0,0 @@
-"use strict";
-const { OpenShiftClientX } = require("@bcgov/pipeline-cli");
-
-const getTargetPhases = (env, phases) => {
- let target_phase = [];
- for (const phase in phases) {
- if (env.match(/^(all|transient)$/) && phases[phase].transient) {
- target_phase.push(phase);
- } else if (env === phase) {
- target_phase.push(phase);
- break;
- }
- }
-
- return target_phase;
-};
-
-module.exports = settings => {
- const phases = settings.phases;
- const options = settings.options;
- const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options));
- const target_phases = getTargetPhases(options.env, phases);
-
- target_phases.forEach(k => {
- if (phases.hasOwnProperty(k)) {
- const phase = phases[k];
-
- let buildConfigs = oc.get("bc", {
- selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- namespace: phase.namespace,
- });
- buildConfigs.forEach(bc => {
- if (bc.spec.output.to.kind == "ImageStreamTag") {
- oc.delete([`ImageStreamTag/${bc.spec.output.to.name}`], {
- "ignore-not-found": "true",
- wait: "true",
- namespace: phase.namespace,
- });
- }
- });
-
- let deploymentConfigs = oc.get("dc", {
- selector: `app=${phase.instance},env-id=${phase.changeId},env-name=${k},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- namespace: phase.namespace,
- });
- deploymentConfigs.forEach(dc => {
- dc.spec.triggers.forEach(trigger => {
- if (
- trigger.type == "ImageChange" &&
- trigger.imageChangeParams.from.kind == "ImageStreamTag"
- ) {
- oc.delete([`ImageStreamTag/${trigger.imageChangeParams.from.name}`], {
- "ignore-not-found": "true",
- wait: "true",
- namespace: phase.namespace,
- });
- }
- });
- });
-
- oc.raw("delete", ["all"], {
- selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- wait: "true",
- namespace: phase.namespace,
- });
- oc.raw(
- "delete",
- ["pvc,Secret,configmap,endpoints,RoleBinding,role,ServiceAccount,Endpoints"],
- {
- selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- wait: "true",
- namespace: phase.namespace,
- },
- );
- }
- });
-};
diff --git a/.jenkins-v3/.pipeline/lib/config.js b/.jenkins-v3/.pipeline/lib/config.js
deleted file mode 100644
index c1832b5ac..000000000
--- a/.jenkins-v3/.pipeline/lib/config.js
+++ /dev/null
@@ -1,20 +0,0 @@
-'use strict';
-const options= require('@bcgov/pipeline-cli').Util.parseArguments()
-const changeId = options.pr //aka pull-request
-const version = '1.0.0'
-const name = 'jenkins'
-const ocpName = 'pathfinder'
-
-const phases = {
- build: {namespace:'tbiwaq-tools' , name: `${name}`, phase: 'build' , changeId:changeId, suffix: `-build-${changeId}` , instance: `${name}-build-${changeId}` , version:`${version}-${changeId}`, tag:`build-${version}-${changeId}`, ocpName: `${ocpName}`},
- dev: {namespace:'tbiwaq-tools' , name: `${name}`, phase: 'dev' , changeId:changeId, suffix: `-dev-${changeId}` , instance: `${name}-dev-${changeId}` , version:`${version}-${changeId}`, tag:`dev-${version}-${changeId}`, ocpName: `${ocpName}`},
- prod: {namespace:'tbiwaq-tools' , name: `${name}`, phase: 'prod' , changeId:changeId, suffix: `-prod` , instance: `${name}-prod` , version:`${version}`, tag:`prod-${version}`, ocpName: `${ocpName}`},
-};
-
-// This callback forces the node process to exit as failure.
-process.on('unhandledRejection', (reason) => {
- console.log(reason);
- process.exit(1);
-});
-
-module.exports = exports = {phases, options};
\ No newline at end of file
diff --git a/.jenkins-v3/.pipeline/lib/deploy.js b/.jenkins-v3/.pipeline/lib/deploy.js
deleted file mode 100755
index f992d7d26..000000000
--- a/.jenkins-v3/.pipeline/lib/deploy.js
+++ /dev/null
@@ -1,44 +0,0 @@
-'use strict';
-const {OpenShiftClientX} = require('@bcgov/pipeline-cli')
-const path = require('path');
-
-module.exports = (settings)=>{
- const phases = settings.phases
- const options = settings.options
- const phase=options.env
- const changeId = phases[phase].changeId
- const oc=new OpenShiftClientX(Object.assign({'namespace':phases[phase].namespace}, options));
- var objects = []
-
- const templatesLocalBaseUrl =oc.toFileUrl(path.resolve(__dirname, '../../openshift'))
-
- objects.push(...oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/deploy-master.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'ENV_NAME': phases[phase].phase,
- 'ROUTE_HOST': `${phases[phase].name}${phases[phase].suffix}-${phases[phase].namespace}.${phases[phase].ocpName}.gov.bc.ca`
- }
- }))
-
- objects.push(...oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/deploy-slave.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'SLAVE_NAME': 'slave',
- 'SLAVE_LABELS': 'build deploy test ui-test',
- 'SLAVE_EXECUTORS': '3',
- 'CPU_REQUEST': '300m',
- 'CPU_LIMIT': '500m',
- 'MEMORY_REQUEST': '2Gi',
- 'MEMORY_LIMIT': '2Gi'
- }
- }))
-
- oc.applyRecommendedLabels(objects, phases[phase].name, phase, `${changeId}`, phases[phase].instance)
- oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag)
- oc.applyAndDeploy(objects, phases[phase].instance)
-
-}
diff --git a/.jenkins-v3/.pipeline/npmw b/.jenkins-v3/.pipeline/npmw
deleted file mode 100755
index 8b392c0f8..000000000
--- a/.jenkins-v3/.pipeline/npmw
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-set +x
-type -t nvm && nvm deactivate
-export NVM_DIR="$(git rev-parse --show-toplevel)/.nvm"
-if [ ! -f "$NVM_DIR/nvm.sh" ]; then
- mkdir -p "${NVM_DIR}"
- curl -sSL -o- https://raw.githubusercontent.com/creationix/nvm/v0.34.0/install.sh | bash &>/dev/null
-fi
-source "$NVM_DIR/nvm.sh" &>/dev/null
-METHOD=script nvm install --no-progress &>/dev/null
-nvm use &>/dev/null
-exec npm "$@" --max-old-space-size=8192
diff --git a/.jenkins-v3/.pipeline/package-lock.json b/.jenkins-v3/.pipeline/package-lock.json
deleted file mode 100644
index 10aadbae1..000000000
--- a/.jenkins-v3/.pipeline/package-lock.json
+++ /dev/null
@@ -1,89 +0,0 @@
-{
- "name": "pipeline",
- "version": "1.0.0",
- "lockfileVersion": 1,
- "requires": true,
- "dependencies": {
- "@bcgov/pipeline-cli": {
- "version": "1.0.1-0",
- "resolved": "https://registry.npmjs.org/@bcgov/pipeline-cli/-/pipeline-cli-1.0.1-0.tgz",
- "integrity": "sha512-DXneptaJuG81Vo+GotZaS4M78uOVnocCCzte6UghOkO+Bt8EQ6xlPblITPXiNDCufO7gOnEmB4T/pyh1ZBnvcw==",
- "requires": {
- "debug": "^4.1.0",
- "lodash.isempty": "^4.0.1",
- "lodash.isfunction": "^3.0.9",
- "lodash.isplainobject": "^4.0.6",
- "lodash.isstring": "^4.0.1",
- "snakecase-keys": "^3.1.0"
- }
- },
- "debug": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
- "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
- "requires": {
- "ms": "^2.1.1"
- }
- },
- "lodash.isempty": {
- "version": "4.4.0",
- "resolved": "https://registry.npmjs.org/lodash.isempty/-/lodash.isempty-4.4.0.tgz",
- "integrity": "sha1-b4bL7di+TsmHvpqvM8loTbGzHn4="
- },
- "lodash.isfunction": {
- "version": "3.0.9",
- "resolved": "https://registry.npmjs.org/lodash.isfunction/-/lodash.isfunction-3.0.9.tgz",
- "integrity": "sha512-AirXNj15uRIMMPihnkInB4i3NHeb4iBtNg9WRWuK2o31S+ePwwNmDPaTL3o7dTJ+VXNZim7rFs4rxN4YU1oUJw=="
- },
- "lodash.isplainobject": {
- "version": "4.0.6",
- "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
- "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs="
- },
- "lodash.isstring": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
- "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE="
- },
- "map-obj": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.1.0.tgz",
- "integrity": "sha512-glc9y00wgtwcDmp7GaE/0b0OnxpNJsVf3ael/An6Fe2Q51LLwN1er6sdomLRzz5h0+yMpiYLhWYF5R7HeqVd4g=="
- },
- "ms": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
- "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
- },
- "snakecase-keys": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/snakecase-keys/-/snakecase-keys-3.1.0.tgz",
- "integrity": "sha512-QM038drLbhdOY5HcRQVjO1ZJ1WR7yV5D5TIBzcOB/g3f5HURHhfpYEnvOyzXet8K+MQsgeIUA7O7vn90nAX6EA==",
- "requires": {
- "map-obj": "^4.0.0",
- "to-snake-case": "^1.0.0"
- }
- },
- "to-no-case": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/to-no-case/-/to-no-case-1.0.2.tgz",
- "integrity": "sha1-xyKQcWTvaxeBMsjmmTAhLRtKoWo="
- },
- "to-snake-case": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/to-snake-case/-/to-snake-case-1.0.0.tgz",
- "integrity": "sha1-znRpE4l5RgGah+Yu366upMYIq4w=",
- "requires": {
- "to-space-case": "^1.0.0"
- }
- },
- "to-space-case": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/to-space-case/-/to-space-case-1.0.0.tgz",
- "integrity": "sha1-sFLar7Gysp3HcM6gFj5ewOvJ/Bc=",
- "requires": {
- "to-no-case": "^1.0.0"
- }
- }
- }
-}
diff --git a/.jenkins-v3/.pipeline/package.json b/.jenkins-v3/.pipeline/package.json
deleted file mode 100644
index 03bfc0798..000000000
--- a/.jenkins-v3/.pipeline/package.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
- "name": "pipeline",
- "version": "1.0.0",
- "description": "This a pipeliene script",
- "engines": {
- "node": ">=8"
- },
- "scripts": {
- "build": "node build.js",
- "clean": "node clean.js",
- "deploy": "node deploy.js",
- "version": "echo \"node@$(node --version) ($(which node))\" && echo \"npm@$(npm --version) ($(which npm))\" && npm ls"
- },
- "repository": {
- "type": "git",
- "url": "git+https://github.com/bcgov/zeva.git"
- },
- "author": "",
- "license": "Apache-2.0",
- "dependencies": {
- "@bcgov/pipeline-cli": "^1.0.1-0"
- }
-}
diff --git a/.jenkins-v3/Jenkinsfile b/.jenkins-v3/Jenkinsfile
deleted file mode 100644
index 0a40c8b7d..000000000
--- a/.jenkins-v3/Jenkinsfile
+++ /dev/null
@@ -1,57 +0,0 @@
-pipeline {
- agent none
- options {
- disableResume()
- }
- stages {
- stage('Build') {
- agent { label 'build' }
- steps {
- script {
- def filesInThisCommitAsString = sh(script:"git diff --name-only HEAD~1..HEAD | grep '^.jenkins-v3/' || echo -n ''", returnStatus: false, returnStdout: true).trim()
- def hasChangesInPath = (filesInThisCommitAsString.length() > 0)
- echo "${filesInThisCommitAsString}"
- if (!currentBuild.rawBuild.getCauses()[0].toString().contains('UserIdCause') && !hasChangesInPath){
- currentBuild.rawBuild.delete()
- error("No changes detected in the path ('^.jenkins-v3/')")
- }
- }
- echo "Aborting all running jobs ..."
- script {
- abortAllPreviousBuildInProgress(currentBuild)
- }
- echo "BRANCH_NAME:${env.BRANCH_NAME}\nCHANGE_ID:${env.CHANGE_ID}\nCHANGE_TARGET:${env.CHANGE_TARGET}"
- echo "Building ..."
- sh "cd .jenkins-v3/.pipeline && ./npmw ci && ./npmw run build -- --pr=${CHANGE_ID}"
- }
- }
- stage('Deploy (DEV)') {
- agent { label 'deploy' }
- steps {
- echo "Deploying ..."
- sh "cd .jenkins-v3/.pipeline && ./npmw ci && DEBUG=info* ./npmw run deploy -- --pr=${CHANGE_ID} --env=dev"
- }
- }
- stage('Deploy (PROD)') {
- agent { label 'deploy' }
- input {
- message "Should we continue with deployment to PROD?"
- ok "Yes!"
- }
- steps {
- echo "Deploying ..."
- sh "cd .jenkins-v3/.pipeline && ./npmw ci && ./npmw run deploy -- --pr=${CHANGE_ID} --env=prod"
- }
- }
- stage('Acceptance') {
- agent { label 'deploy' }
- input {
- message "Should we continue with cleanup?"
- ok "Yes!"
- }
- steps {
- echo "Cleaning ..."
- }
- }
- }
-}
\ No newline at end of file
diff --git a/.jenkins-v3/README.md b/.jenkins-v3/README.md
deleted file mode 100644
index e338c2aba..000000000
--- a/.jenkins-v3/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Overview
-
-For running locally, it is recommended to use locally installed npm/node. The command snippets provide bellow assume you have npm/node installed. Also, the command lines are provided as subshell (within parthnesis) so that it will work regardless of of your current shell work directory, as long as it is within the git working directory.
-
-For running from a Jenkinsfile, it is recommened to replace `npm` with the provided `npmw` as it will download and install node/npm using `nvm`.
-
-Before running in any of your projects ensure that you have created proper GitHub and Slave User secrets below:
-template.
-github
-template.-slave-user
-
-Github Webhooks are only created during the PROD deployment.
-
-Windows users can just do the `cd` manually to the root folder of their repo and remove `$(git rev-parse --show-toplevel)/` from the commands below.
-
-# Update OCP Name
-Replace pathfinder with proper name in docker/contrib/jenkins/configuration/config.xml
-
-# Build
-```
-( cd "$(git rev-parse --show-toplevel)/.jenkins/.pipeline" && npm run build -- --pr=0 --dev-mode=true )
-```
-Where:
-`--pr=0` is used to set the pull request number to build from.
-`--dev-mode=true` is used to indicate that the build will actually take the files in the current working directory, as opposed to a fresh `git clone`
-
-# Deploy to DEV
-```
-( cd "$(git rev-parse --show-toplevel)/.jenkins/.pipeline" && npm run deploy -- --pr=0 --env=dev )
-```
-
-# Deploy to PROD
-```
-( cd "$(git rev-parse --show-toplevel)/.jenkins/.pipeline" && npm run deploy -- --pr=0 --env=prod )
-```
-
-# Clean
-The clean script can run against each persistent environment, starting from `build`.
-```
-( cd "$(git rev-parse --show-toplevel)/.jenkins/.pipeline" && npm run clean -- --pr=0 --env=build )
-( cd "$(git rev-parse --show-toplevel)/.jenkins/.pipeline" && npm run clean -- --pr=0 --env=dev )
-```
-
-*Warning*: Do *NOT* run against `test` or `prod`. It will cause *PERMANENT* deletion of all objects including `PVC`! be warned!
diff --git a/.jenkins-v3/docker/Dockerfile b/.jenkins-v3/docker/Dockerfile
deleted file mode 100644
index 1eec80a27..000000000
--- a/.jenkins-v3/docker/Dockerfile
+++ /dev/null
@@ -1,11 +0,0 @@
-FROM jenkins/core/there
-
-USER 0
-COPY ./contrib/jenkins/configuration $JENKINS_REF_HOME
-
-RUN set -x && \
- chgrp -R 0 $JENKINS_REF_HOME && \
- chmod -R 644 $JENKINS_REF_HOME && \
- chmod -R g+rwX $JENKINS_REF_HOME
-
-USER 1001
diff --git a/.jenkins-v3/docker/contrib/jenkins/configuration/config.xml b/.jenkins-v3/docker/contrib/jenkins/configuration/config.xml
deleted file mode 100644
index 7128df567..000000000
--- a/.jenkins-v3/docker/contrib/jenkins/configuration/config.xml
+++ /dev/null
@@ -1,110 +0,0 @@
-
-
-
- GitHubHookRegisterProblemMonitor
- jenkins.security.QueueItemAuthenticatorMonitor
-
- 2.176.1
- RUNNING
- 0
- EXCLUSIVE
- true
-
- com.cloudbees.plugins.credentials.CredentialsProvider.Create:kuanfandevops-admin-edit-view
- com.cloudbees.plugins.credentials.CredentialsProvider.Delete:kuanfandevops-admin-edit-view
- com.cloudbees.plugins.credentials.CredentialsProvider.ManageDomains:kuanfandevops-admin-edit-view
- com.cloudbees.plugins.credentials.CredentialsProvider.Update:kuanfandevops-admin-edit-view
- com.cloudbees.plugins.credentials.CredentialsProvider.View:kuanfandevops-admin-edit-view
- hudson.model.Computer.Configure:kuanfandevops-admin-edit-view
- hudson.model.Computer.Connect:jenkins-slave
- hudson.model.Computer.Create:jenkins-slave
- hudson.model.Computer.Delete:kuanfandevops-admin-edit-view
- hudson.model.Hudson.Administer:kuanfandevops-admin-edit-view
- hudson.model.Hudson.Read:jenkins-slave
- hudson.model.Hudson.Read:kuanfandevops-admin-edit-view
- hudson.model.Hudson.RunScripts:kuanfandevops-admin-edit-view
- hudson.model.Item.Build:kuanfandevops-admin-edit-view
- hudson.model.Item.Cancel:kuanfandevops-admin-edit-view
- hudson.model.Item.Configure:kuanfandevops-admin-edit-view
- hudson.model.Item.Create:kuanfandevops-admin-edit-view
- hudson.model.Item.Delete:kuanfandevops-admin-edit-view
- hudson.model.Item.Discover:kuanfandevops-admin-edit-view
- hudson.model.Item.Read:kuanfandevops-admin-edit-view
- hudson.model.Item.Workspace:kuanfandevops-admin-edit-view
- hudson.model.Run.Delete:kuanfandevops-admin-edit-view
- hudson.model.Run.Update:kuanfandevops-admin-edit-view
- hudson.model.View.Configure:kuanfandevops-admin-edit-view
- hudson.model.View.Create:kuanfandevops-admin-edit-view
- hudson.model.View.Delete:kuanfandevops-admin-edit-view
- hudson.scm.SCM.Tag:kuanfandevops-admin-edit-view
-
-
- /run/secrets/kubernetes.io/serviceaccount
- jenkins-prod
- https://openshift.default.svc
- https://console.pathfinder.gov.bc.ca:8443
- system:serviceaccount:tbiwaq-tools:jenkins-prod
- eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJ0Yml3YXEtdG9vbHMiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlY3JldC5uYW1lIjoiamVua2lucy1wcm9kLXRva2VuLWhteHo5Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImplbmtpbnMtcHJvZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjEyZDJlYWNlLTJjMTItMTFlYS1hZGNkLTAwNTA1NjgzNDhjYyIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDp0Yml3YXEtdG9vbHM6amVua2lucy1wcm9kIn0.PJ7GysGdZdy3uPSLUmFRLUHZHpkocbPV08aXGeeIW5zWWlbwlWMHeTyiF3Tqo4ToYMhcXmqhFukEr6NNRtgxrVUsHuOx4i7OH8UFrW1TeXYgPBBUpfwXhOoIhhTA2Jl5lbWVvsPNNoB_f-SFe3cqcNC0sIctb2EwIl6CBV8JksjfWtxSXzJGkdBYFf9mvBftCq7QlEYVoMWBJ_goOd-j3ITHyo5klVFdmLN4i7GphWPcoFMEsxxEGhbvbG3NNiwA0J_43Ueo6po7V-UH9HNTuD38coqWJbK3PXsr9IUrlncVqj3pbWi3P9Ir_CInjInALpCQPiOupBZ7w2BfgiHdAA
- tbiwaq-tools
-
- https://console.pathfinder.gov.bc.ca:8443
- https://console.pathfinder.gov.bc.ca:8443/oauth/authorize
- https://console.pathfinder.gov.bc.ca:8443/oauth/token
-
-
-
- true
-
- ${JENKINS_HOME}/workspace/${ITEM_FULL_NAME}
- ${JENKINS_HOME}/builds/${ITEM_FULL_NAME}
-
-
-
-
-
-
- openshift
-
-
-
- false
- false
- false
- http://jenkins-prod.tbiwaq-tools.svc:8080
- jenkins-prod.tbiwaq-tools.svc:50000
- 10
- 5
- 5
- 15
- false
- 32
- 600
-
-
-
- 5
- 0
-
-
-
- all
- false
- false
-
-
-
- all
- 50000
-
- JNLP-connect
- JNLP2-connect
- JNLP3-connect
-
-
-
- true
-
-
-
- true
-
diff --git a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/_jenkins/config.xml b/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/_jenkins/config.xml
deleted file mode 100644
index b2e832507..000000000
--- a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/_jenkins/config.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
-
-
-
-
-
-
- true
- -1
- -1
-
-
- false
-
-
-
-
-
-
-
-
-
-
-
-
-
- .jenkins-v3/Jenkinsfile
-
-
\ No newline at end of file
diff --git a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/config.xml b/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/config.xml
deleted file mode 100644
index b97a091e6..000000000
--- a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/config.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
-
-
-
-
- All
- false
- false
-
-
-
-
-
-
-
- false
-
-
-
-
diff --git a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/jobs/develop-pipeline/config.xml b/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/jobs/develop-pipeline/config.xml
deleted file mode 100644
index 8a1237a48..000000000
--- a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva-release-pipelines/jobs/develop-pipeline/config.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
-
-
- false
-
-
-
- false
- false
-
-
-
-
- 2
-
-
- https://github.com/bcgov/zeva.git
- github-account
-
-
-
-
- release-pipeline
-
-
- false
-
-
-
- openshift/pipelines/Jenkinsfile-develop
- false
-
-
- false
-
diff --git a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva/config.xml b/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva/config.xml
deleted file mode 100644
index cb03dd84e..000000000
--- a/.jenkins-v3/docker/contrib/jenkins/configuration/jobs/zeva/config.xml
+++ /dev/null
@@ -1,64 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- false
-
-
-
-
-
-
- true
- -1
- -1
-
-
- false
-
-
-
-
-
-
-
-
-
-
-
-
-
- Jenkinsfile-v3
-
-
\ No newline at end of file
diff --git a/.jenkins-v3/docker/contrib/jenkins/configuration/scripts.groovy.d/on-gh-event.groovy b/.jenkins-v3/docker/contrib/jenkins/configuration/scripts.groovy.d/on-gh-event.groovy
deleted file mode 100644
index 3c912bb4f..000000000
--- a/.jenkins-v3/docker/contrib/jenkins/configuration/scripts.groovy.d/on-gh-event.groovy
+++ /dev/null
@@ -1,133 +0,0 @@
-import groovy.json.*
-
-class OnGhEvent extends Script {
-
-static Map exec(List args, File workingDirectory=null, Appendable stdout=null, Appendable stderr=null, Closure stdin=null){
- ProcessBuilder builder = new ProcessBuilder(args as String[])
- if (stderr ==null){
- builder.redirectErrorStream(true)
- }
- if (workingDirectory!=null){
- builder.directory(workingDirectory)
- }
- def proc = builder.start()
-
- if (stdin!=null) {
- OutputStream out = proc.getOutputStream();
- stdin(out)
- out.flush();
- out.close();
- }
-
- if (stdout == null ){
- stdout = new StringBuffer()
- }
-
- proc.waitForProcessOutput(stdout, stderr)
- int exitValue= proc.exitValue()
-
- Map ret = ['out': stdout, 'err': stderr, 'status':exitValue, 'cmd':args]
-
- return ret
-}
-
- def run() {
- String ghPayload = build.buildVariableResolver.resolve("payload")
- String ghEventType = build.buildVariableResolver.resolve("x_github_event")
- String buildNumber = build.getNumber()
- String fullName = build.getProject().getFullName()
-
- File workDir = new File("/tmp/jenkins/on-gh-event/${fullName}/${buildNumber}")
- try{
- if ("pull_request" == ghEventType){
- def payload = new JsonSlurper().parseText(ghPayload)
- if ("closed" == payload.action){
- File gitWorkDir = workDir
- def ghRepo=com.cloudbees.jenkins.GitHubRepositoryName.create(payload.repository.clone_url).resolveOne()
- boolean isFromCollaborator=ghRepo.root.retrieve().asHttpStatusCode(ghRepo.getApiTailUrl("collaborators/${payload.pull_request.user.login}")) == 204
- String cloneUrl = payload.repository.clone_url
- String sourceBranch = isFromCollaborator?"refs/pull/${payload.number}/head":"refs/heads/${payload.pull_request.base.ref}"
- println "Is Collaborator:${isFromCollaborator} (${payload.pull_request.user.login})"
- println "Clone Url:${cloneUrl}"
- println "Checkout Branch:${sourceBranch}"
-
- println exec(['mkdir', '-p', gitWorkDir.getAbsolutePath()])
- println exec(['rm', '-rf', gitWorkDir.getAbsolutePath()])
- println exec(['git', 'init', gitWorkDir.getAbsolutePath()])
- println exec(['git', 'remote', 'add', 'origin', payload.repository.clone_url], gitWorkDir)
- println exec(['git', 'fetch', '--no-tags', payload.repository.clone_url, "+${sourceBranch}:PR-${payload.number}"], gitWorkDir)
- println exec(['git', 'checkout', "PR-${payload.number}"] , gitWorkDir)
-
- def pipelines = new FileNameFinder().getFileNames(gitWorkDir.getAbsolutePath(), '**/.pipeline-v3/package.json')
- pipelines.each {
- def pipelineWorkDir = new File(it).getParentFile()
- println exec(['./npmw', 'ci'], pipelineWorkDir)
- println exec(['./npmw', 'run', 'clean' ,'--' ,"--pr=${payload.number}", '--env=transient'], pipelineWorkDir)
- }
- }
- }else if ("issue_comment" == ghEventType){
- def payload = new JsonSlurper().parseText(ghPayload)
- if ("created" == payload.action && payload.issue.pull_request !=null ){
- String comment = payload.comment.body.trim()
-
- //OWNER or COLLABORATOR
- //https://developer.github.com/v4/enum/commentauthorassociation/
- String commentAuthorAssociation = payload.comment.author_association
- if (comment.charAt(0) == '/'){
- println "command: ${comment}"
- String jobName= payload.repository.name
- String jobPRName = payload.repository.full_name
-
- List projects = jenkins.model.Jenkins.instance.getAllItems(org.jenkinsci.plugins.workflow.multibranch.WorkflowMultiBranchProject.class).findAll {
- def scmSource=it.getSCMSources()[0]
- return payload.repository.owner.login.equalsIgnoreCase(scmSource.getRepoOwner()) && payload.repository.name.equalsIgnoreCase(scmSource.getRepository())
- }
- List branchProjects = []
- projects.each {
- def branchProject = it.getItem("PR-${payload.issue.number}")
- if (branchProject!=null){
- branchProjects.add(branchProject)
- }
- }
-
- if (comment == '/restart' && (commentAuthorAssociation == 'OWNER' || commentAuthorAssociation == 'COLLABORATOR')){
- //
- branchProjects.each {
- def targetProject=it
- def cause = new hudson.model.Cause.RemoteCause('github.com', "Pull Request Command By '${payload.comment.user.login}'")
- targetProject.scheduleBuild(0, cause)
- }
- }else if (comment == '/approve' && (commentAuthorAssociation == 'OWNER' || commentAuthorAssociation == 'COLLABORATOR')){
- if (branchProjects.size() > 0){
- branchProjects.each { targetJob ->
- if (targetJob.getLastBuild()){
- hudson.security.ACL.impersonate(hudson.security.ACL.SYSTEM, {
- for (org.jenkinsci.plugins.workflow.support.steps.input.InputAction inputAction : targetJob.getLastBuild().getActions(org.jenkinsci.plugins.workflow.support.steps.input.InputAction.class)){
- for (org.jenkinsci.plugins.workflow.support.steps.input.InputStepExecution inputStep:inputAction.getExecutions()){
- if (!inputStep.isSettled()){
- println inputStep.proceed(null)
- }
- }
- }
- } as Runnable )
- }
- }
- }else{
- println "There is no project or build associated with ${payload.issue.pull_request.html_url}"
- }
- }
- }
- }
- }
- }finally{
- exec(['rm', '-rf', workDir.getAbsolutePath()])
- }
-
-
- return null;
- } //end run
-
- static void main(String[] args) {
- org.codehaus.groovy.runtime.InvokerHelper.runScript(OnGhEvent, args)
- }
-}
\ No newline at end of file
diff --git a/.jenkins-v3/openshift/build-master.yaml b/.jenkins-v3/openshift/build-master.yaml
deleted file mode 100644
index 2f562121d..000000000
--- a/.jenkins-v3/openshift/build-master.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: jenkins
-objects:
-- apiVersion: v1
- kind: ImageStream
- metadata:
- creationTimestamp: null
- labels:
- shared: "true"
- name: ${NAME}
- spec:
- lookupPolicy:
- local: false
-- apiVersion: v1
- kind: BuildConfig
- metadata:
- creationTimestamp: null
- name: ${NAME}${SUFFIX}
- spec:
- failedBuildsHistoryLimit: 2
- output:
- to:
- kind: ImageStreamTag
- name: ${NAME}:${VERSION}
- postCommit: {}
- resources:
- limits:
- cpu: 2000m
- memory: 4Gi
- requests:
- cpu: 500m
- memory: 1Gi
- runPolicy: SerialLatestOnly
- source:
- contextDir: .jenkins-v3/docker
- git:
- ref: ${SOURCE_REPOSITORY_REF}
- uri: ${SOURCE_REPOSITORY_URL}
- type: Git
- strategy:
- dockerStrategy:
- from:
- kind: ImageStreamTag
- name: ${SOURCE_IMAGE_STREAM_TAG}
- namespace: ${SOURCE_IMAGE_STREAM_NAMESPACE}
- type: Docker
- successfulBuildsHistoryLimit: 2
- triggers:
- - type: ConfigChange
- - imageChange: {}
- type: ImageChange
-parameters:
-- description: A name used for all objects
- displayName: Name
- name: NAME
- required: true
- value: jenkins
-- description: A name suffix used for all objects
- displayName: Suffix
- name: SUFFIX
- required: false
- value: ""
-- description: A version used for the image tags
- displayName: version
- name: VERSION
- required: true
- value: v1.0.0
-- name: SOURCE_IMAGE_STREAM_NAMESPACE
- required: true
- value: bcgov
-- name: SOURCE_IMAGE_STREAM_TAG
- required: true
- value: jenkins-basic:v2-latest
-- name: SOURCE_REPOSITORY_URL
- required: true
-- name: SOURCE_REPOSITORY_REF
- required: false
- value: tools
diff --git a/.jenkins-v3/openshift/build-slave.yaml b/.jenkins-v3/openshift/build-slave.yaml
deleted file mode 100644
index 25fb7a4df..000000000
--- a/.jenkins-v3/openshift/build-slave.yaml
+++ /dev/null
@@ -1,110 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: jenkins
-objects:
- - apiVersion: v1
- kind: ImageStream
- metadata:
- creationTimestamp: null
- labels:
- shared: "true"
- name: ${NAME}-slave-${SLAVE_NAME}
- spec:
- lookupPolicy:
- local: false
- - apiVersion: v1
- kind: BuildConfig
- metadata:
- creationTimestamp: null
- name: ${NAME}-slave-${SLAVE_NAME}${SUFFIX}
- spec:
- failedBuildsHistoryLimit: 2
- output:
- to:
- kind: ImageStreamTag
- name: ${NAME}-slave-${SLAVE_NAME}:${VERSION}
- postCommit: {}
- resources:
- limits:
- cpu: "1"
- memory: 1Gi
- requests:
- cpu: "1"
- memory: 1Gi
- runPolicy: SerialLatestOnly
- source:
- # contextDir: ${SOURCE_CONTEXT_DIR}
- # git:
- # ref: ${SOURCE_GIT_REF}
- # uri: ${SOURCE_GIT_URL}
- # type: Git
- dockerfile: |
- FROM BuildConfig
- USER 0
- RUN fix_permission(){ while [[ $# > 0 ]] ; do chgrp -R 0 "$1" && chmod -R g=u "$1"; shift; done } && \
- set -x && \
- mkdir /opt/flywaydb && \
- curl -sSL https://repo1.maven.org/maven2/org/flywaydb/flyway-commandline/5.2.4/flyway-commandline-5.2.4-linux-x64.tar.gz | tar xvz --strip-components=1 -C /opt/flywaydb && \
- ln -s /opt/flywaydb/flyway /usr/local/bin && \
- mkdir /opt/liquibase && \
- curl -sSL https://github.com/liquibase/liquibase/releases/download/liquibase-parent-3.6.3/liquibase-3.6.3-bin.tar.gz | tar xvz -C /opt/liquibase --exclude=sdk --exclude=liquibase.bat && \
- ln -s /opt/liquibase/liquibase /usr/local/bin && \
- curl -sSL -o /tmp/sonnar-scanner-cli.zip https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-3.3.0.1492-linux.zip && \
- unzip /tmp/sonnar-scanner-cli.zip -d /tmp/sonnar-scanner-cli && \
- mv /tmp/sonnar-scanner-cli/sonar-scanner-3.3.0.1492-linux /opt/sonnar-scanner && \
- ln -s /opt/sonnar-scanner/bin/sonar-scanner /usr/local/bin && \
- rm -rf /tmp/sonnar-scanner-cli.zip && \
- rm -rf /tmp/sonnar-scanner-cli && \
- fix_permission '/opt/flywaydb' '/opt/liquibase' '/opt/sonnar-scanner'
- USER 1001
- type: Dockerfile
- strategy:
- dockerStrategy:
- from:
- kind: ImageStreamTag
- name: ${SOURCE_IMAGE_STREAM_TAG}
- namespace: ${SOURCE_IMAGE_STREAM_NAMESPACE}
- type: Docker
- successfulBuildsHistoryLimit: 2
- triggers:
- - type: ConfigChange
- - imageChange:
- from:
- kind: ImageStreamTag
- name: ${SOURCE_IMAGE_STREAM_TAG}
- namespace: ${SOURCE_IMAGE_STREAM_NAMESPACE}
- type: ImageChange
-parameters:
- - description: A name used for all objects
- displayName: Name
- name: NAME
- required: true
- value: jenkins
- - description: A name suffix used for all objects
- displayName: Suffix
- name: SUFFIX
- required: false
- value: "-0"
- - description: A version used for the image tags
- displayName: version
- name: VERSION
- required: true
- value: latest
- - name: SLAVE_NAME
- value: build
- - name: SOURCE_IMAGE_STREAM_NAMESPACE
- value: ""
- - name: SOURCE_IMAGE_STREAM_TAG
- required: true
- value: "jenkins:build-v2.0-0"
- - name: SOURCE_GIT_URL
- required: true
- value: https://github.com/cvarjao-o/hello-world.git
- - name: SOURCE_GIT_REF
- required: true
- value: master
- - name: SOURCE_CONTEXT_DIR
- required: true
- value: .jenkins-v3/docker-slave
diff --git a/.jenkins-v3/openshift/deploy-master.yaml b/.jenkins-v3/openshift/deploy-master.yaml
deleted file mode 100644
index 6fc928356..000000000
--- a/.jenkins-v3/openshift/deploy-master.yaml
+++ /dev/null
@@ -1,297 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: jenkins
-objects:
-- apiVersion: v1
- kind: ImageStream
- metadata:
- creationTimestamp: null
- labels:
- shared: "true"
- name: ${NAME}
- spec:
- lookupPolicy:
- local: false
-- apiVersion: v1
- kind: Secret
- metadata:
- annotations:
- as-copy-of: template.${NAME}-slave-user
- as-copy-of/preserve: password
- name: ${NAME}${SUFFIX}-slave-user
- stringData:
- metadata.name: ${NAME}${SUFFIX}-slave-user
- password: ${SLAVE_USER_PASSWORD}
- username: jenkins-slave
- type: kubernetes.io/basic-auth
-- apiVersion: v1
- kind: Secret
- metadata:
- annotations:
- as-copy-of: template.${NAME}-github
- name: ${NAME}${SUFFIX}-github
- stringData:
- metadata.name: ${NAME}${SUFFIX}-github
- password: ${GITHUB_TOKEN}
- username: ${GITHUB_USERNAME}
- type: kubernetes.io/basic-auth
-- apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- annotations:
- volume.beta.kubernetes.io/storage-class: netapp-file-standard
- volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/glusterfs
- name: ${NAME}${SUFFIX}
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: 1Gi
-- apiVersion: v1
- kind: ServiceAccount
- metadata:
- annotations:
- serviceaccounts.openshift.io/oauth-redirectreference.jenkins: '{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"${NAME}${SUFFIX}"}}'
- name: ${NAME}${SUFFIX}
-- apiVersion: v1
- groupNames: null
- kind: RoleBinding
- metadata:
- name: ${NAME}${SUFFIX}_edit
- roleRef:
- name: edit
- subjects:
- - kind: ServiceAccount
- name: ${NAME}${SUFFIX}
-- apiVersion: v1
- groupNames: null
- kind: RoleBinding
- metadata:
- name: ${NAME}${SUFFIX}_admin
- roleRef:
- name: admin
- subjects:
- - kind: ServiceAccount
- name: ${NAME}${SUFFIX}
-- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- annotations:
- template.alpha.openshift.io/wait-for-ready: "true"
- creationTimestamp: null
- name: ${NAME}${SUFFIX}
- spec:
- replicas: 1
- revisionHistoryLimit: 10
- selector:
- deploymentconfig: ${NAME}${SUFFIX}
- strategy:
- activeDeadlineSeconds: 21600
- recreateParams:
- timeoutSeconds: 600
- resources: {}
- type: Recreate
- template:
- metadata:
- creationTimestamp: null
- labels:
- deploymentconfig: ${NAME}${SUFFIX}
- spec:
- containers:
- - command:
- - /usr/local/bin/container-entrypoint
- - /usr/local/bin/jenkins-run
- env:
- - name: USE_JAVA_DIAGNOSTICS
- value: "true"
- - name: JENKINS_URL
- value: https://${ROUTE_HOST}${ROUTE_PATH}
- - name: ENV_NAME
- value: ${ENV_NAME}
- - name: ENV_ID
- value: ${ENV_ID}
- image: ' '
- imagePullPolicy: Always
- livenessProbe:
- failureThreshold: 3
- httpGet:
- path: /login
- port: 8080
- initialDelaySeconds: 420
- periodSeconds: 360
- timeoutSeconds: 240
- name: jenkins
- ports:
- - containerPort: 50000
- protocol: TCP
- - containerPort: 8080
- protocol: TCP
- readinessProbe:
- httpGet:
- path: /login
- port: 8080
- initialDelaySeconds: 3
- timeoutSeconds: 240
- resources:
- limits:
- cpu: "2"
- memory: 2Gi
- requests:
- cpu: 300m
- memory: 2Gi
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- volumeMounts:
- - mountPath: /var/lib/jenkins/jobs
- name: jenkins-jobs
- readOnly: false
- - mountPath: /var/run/pod
- name: pod-metadata
- - mountPath: /run/secrets/jenkins-slave-user
- name: jenkins-slave-user
- readOnly: true
- - mountPath: /run/secrets/github
- name: github
- readOnly: true
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- serviceAccount: ${NAME}${SUFFIX}
- serviceAccountName: ${NAME}${SUFFIX}
- terminationGracePeriodSeconds: 30
- volumes:
- - name: jenkins-jobs
- persistentVolumeClaim:
- claimName: ${NAME}${SUFFIX}
- - downwardAPI:
- items:
- - fieldRef:
- fieldPath: metadata.name
- name: name
- path: name
- - fieldRef:
- fieldPath: metadata.namespace
- name: namespace
- path: namespace
- - fieldRef:
- fieldPath: metadata.labels
- name: labels
- path: labels
- - fieldRef:
- fieldPath: metadata.annotations
- name: annotations
- path: annotations
- name: pod-metadata
- - name: jenkins-slave-user
- secret:
- defaultMode: 420
- secretName: ${NAME}${SUFFIX}-slave-user
- - name: github
- secret:
- defaultMode: 420
- secretName: ${NAME}${SUFFIX}-github
- test: false
- triggers:
- - imageChangeParams:
- automatic: true
- containerNames:
- - jenkins
- from:
- kind: ImageStreamTag
- name: ${NAME}:${VERSION}
- type: ImageChange
- - type: ConfigChange
-- apiVersion: v1
- kind: Service
- metadata:
- creationTimestamp: null
- name: ${NAME}${SUFFIX}
- spec:
- ports:
- - name: 8080-tcp
- port: 8080
- protocol: TCP
- targetPort: 8080
- - name: 50000-tcp
- port: 50000
- protocol: TCP
- targetPort: 50000
- selector:
- deploymentconfig: ${NAME}${SUFFIX}
- sessionAffinity: None
- type: ClusterIP
-- apiVersion: v1
- kind: Route
- metadata:
- creationTimestamp: null
- name: ${NAME}${SUFFIX}
- spec:
- host: ${ROUTE_HOST}
- path: ${ROUTE_PATH}
- port:
- targetPort: 8080-tcp
- tls:
- termination: edge
- to:
- kind: Service
- name: ${NAME}${SUFFIX}
- weight: 100
- wildcardPolicy: None
-parameters:
-- description: A name used for all objects
- displayName: Name
- name: NAME
- required: true
-- description: A name suffix used for all objects
- displayName: Suffix
- name: SUFFIX
- required: false
- value: ""
-- description: A version used for the image tags
- displayName: version
- name: VERSION
- required: true
- value: v1.0.0
-- description: GITHUB_USERNAME
- displayName: GITHUB_USERNAME
- name: GITHUB_USERNAME
- required: true
- value: cvarjao
-- description: ROUTE_HOST
- displayName: ROUTE_HOST
- name: ROUTE_HOST
- required: true
-- description: ROUTE_PATH
- displayName: ROUTE_PATH
- name: ROUTE_PATH
- required: true
- value: /
-- description: Environment Name
- displayName: ENV_NAME
- name: ENV_NAME
- required: true
- value: prod
-- description: Environment ID
- displayName: ENV_ID
- name: ENV_ID
- required: true
- value: prod
-- description: SLAVE_USER_PASSWORD
- displayName: SLAVE_USER_PASSWORD
- from: '[a-zA-Z0-9]{16}'
- generate: expression
- name: SLAVE_USER_PASSWORD
-- description: GITHUB_USERNAME
- displayName: GITHUB_USERNAME
- name: GITHUB_USERNAME
- required: false
- value: ""
-- description: GitHub Personal Access Token
- displayName: GITHUB_USERNAME
- name: GITHUB_TOKEN
- required: false
- value: ""
diff --git a/.jenkins-v3/openshift/deploy-prereq.yaml b/.jenkins-v3/openshift/deploy-prereq.yaml
deleted file mode 100644
index 8ac749f4e..000000000
--- a/.jenkins-v3/openshift/deploy-prereq.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-objects:
-- apiVersion: v1
- kind: Secret
- metadata:
- annotations: null
- name: template.${NAME}-slave-user
- stringData:
- metadata.name: template.${NAME}-slave-user
- username: jenkins-slave
-- apiVersion: v1
- kind: Secret
- metadata:
- annotations: null
- name: template.${NAME}-github
- stringData:
- metadata.name: template.${NAME}-github
- username: ${GH_USERNAME}
- password: ${GH_ACCESS_TOKEN}
-parameters:
-- description: A name used for all objects
- displayName: Name
- name: NAME
- required: true
- value: jenkins
-- name: GH_USERNAME
- required: true
-- description: GitHub Personal Access Token
- name: GH_ACCESS_TOKEN
- required: true
diff --git a/.jenkins-v3/openshift/deploy-slave.yaml b/.jenkins-v3/openshift/deploy-slave.yaml
deleted file mode 100644
index 6f992b259..000000000
--- a/.jenkins-v3/openshift/deploy-slave.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: jenkins
-objects:
-- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- creationTimestamp: null
- name: ${NAME}-${SLAVE_NAME}${SUFFIX}
- spec:
- replicas: "${{REPLICAS}}"
- revisionHistoryLimit: 10
- selector:
- deploymentconfig: ${NAME}-${SLAVE_NAME}${SUFFIX}
- strategy:
- activeDeadlineSeconds: 21600
- recreateParams:
- timeoutSeconds: 600
- resources: {}
- type: Recreate
- template:
- metadata:
- creationTimestamp: null
- labels:
- deploymentconfig: ${NAME}-${SLAVE_NAME}${SUFFIX}
- spec:
- initContainers:
- - name: init
- image: " "
- command:
- - "curl"
- - "-sSf"
- - "http://${NAME}${SUFFIX}:8080/login"
- containers:
- - command:
- - bash
- - -c
- - cd $HOME && java -XshowSettings:vm -version && exec java -jar /usr/lib/jenkins/swarm-client.jar
- -name "$(cat /etc/hostname)" -deleteExistingClients -fsroot "$JENKINS_HOME/$(cat
- /etc/hostname)" -master http://$JENKINS_MASTER_SERVICE:8080 -disableSslVerification
- -username "$(cat /var/run/secrets/jenkins-slave-user/username)" -passwordFile
- /var/run/secrets/jenkins-slave-user/password -description "$(cat /etc/hostname)"
- -executors ${SLAVE_EXECUTORS} -labels '${SLAVE_LABELS}' -mode
- 'normal' -retry 10 -tunnel $JENKINS_MASTER_SERVICE:50000 -disableClientsUniqueId
- env:
- - name: JENKINS_MASTER_SERVICE
- value: ${NAME}${SUFFIX}
- - name: JAVA_TOOL_OPTIONS
- value: -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap
- -XX:MaxRAMFraction=5 -XX:MaxHeapFreeRatio=20 -XX:MinHeapFreeRatio=10
- -XX:+UseParallelGC -XX:ParallelGCThreads=2
- - name: ENV_NAME
- value: ${ENV_NAME}
- - name: ENV_ID
- value: ${ENV_ID}
- image: ' '
- imagePullPolicy: Always
- name: jenkins
- ports:
- - containerPort: 50000
- protocol: TCP
- - containerPort: 8080
- protocol: TCP
- resources:
- limits:
- cpu: ${CPU_LIMIT}
- memory: ${MEMORY_LIMIT}
- requests:
- cpu: ${CPU_REQUEST}
- memory: ${MEMORY_REQUEST}
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- volumeMounts:
- - mountPath: /var/run/pod
- name: pod-metadata
- - mountPath: /run/secrets/jenkins-slave-user
- name: jenkins-slave-user
- readOnly: true
- - mountPath: /run/secrets/github
- name: github
- readOnly: true
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- serviceAccount: ${NAME}${SUFFIX}
- serviceAccountName: ${NAME}${SUFFIX}
- terminationGracePeriodSeconds: 30
- volumes:
- - name: jenkins-home
- persistentVolumeClaim:
- claimName: ${NAME}${SUFFIX}
- - downwardAPI:
- items:
- - fieldRef:
- fieldPath: metadata.name
- name: name
- path: name
- - fieldRef:
- fieldPath: metadata.namespace
- name: namespace
- path: namespace
- - fieldRef:
- fieldPath: metadata.labels
- name: labels
- path: labels
- - fieldRef:
- fieldPath: metadata.annotations
- name: annotations
- path: annotations
- name: pod-metadata
- - name: jenkins-slave-user
- secret:
- defaultMode: 420
- secretName: ${NAME}${SUFFIX}-slave-user
- - name: github
- secret:
- defaultMode: 420
- secretName: ${NAME}${SUFFIX}-github
- test: false
- triggers:
- - imageChangeParams:
- automatic: true
- containerNames:
- - jenkins
- - init
- from:
- kind: ImageStreamTag
- name: ${NAME}:${VERSION}
- type: ImageChange
- - type: ConfigChange
-parameters:
-- description: A name used for all objects
- displayName: Name
- name: NAME
- required: true
-- description: A name suffix used for all objects
- displayName: Suffix
- name: SUFFIX
- required: false
- value: ""
-- description: A version used for the image tags
- displayName: version
- name: VERSION
- required: true
- value: v1.0.0
-- name: SLAVE_NAME
- required: true
-- name: SLAVE_LABELS
- value: "Linux rhel rhel7 build test deploy light"
-- name: SLAVE_EXECUTORS
- value: "3"
-- name: REPLICAS
- value: "1"
-- name: CPU_REQUEST
- value: "300m"
-- name: CPU_LIMIT
- value: "500m"
-- name: MEMORY_REQUEST
- value: "1Gi"
-- name: MEMORY_LIMIT
- value: "1Gi"
\ No newline at end of file
diff --git a/.jenkins-v3/openshift/secrets.json b/.jenkins-v3/openshift/secrets.json
deleted file mode 100644
index 1add567d0..000000000
--- a/.jenkins-v3/openshift/secrets.json
+++ /dev/null
@@ -1,50 +0,0 @@
-{
- "kind": "Template",
- "apiVersion": "v1",
- "metadata": {
- "name": "jenkins",
- "creationTimestamp": null
- },
- "parameters":[
- {
- "name": "NAME",
- "displayName": "Name",
- "description": "A name used for all objects",
- "required": true,
- "value": "jenkins"
- },{
- "name": "GH_USERNAME",
- "required": true
- },{
- "name": "GH_PASSWORD",
- "description": "GitHub Personal Access Token",
- "required": true
- }
- ],
- "objects": [
- {
- "apiVersion": "v1",
- "kind": "Secret",
- "metadata": {
- "annotations": null,
- "name": "template.${NAME}-slave-user"
- },
- "stringData": {
- "metadata.name": "template.${NAME}-slave-user",
- "username": "jenkins-slave"
- }
- },{
- "apiVersion": "v1",
- "kind": "Secret",
- "metadata": {
- "annotations": null,
- "name": "template.${NAME}-github"
- },
- "stringData": {
- "metadata.name": "template.${NAME}-github",
- "username": "${GH_USERNAME}",
- "password": "${GH_PASSWORD}"
- }
- }
- ]
-}
\ No newline at end of file
diff --git a/.pipeline-v3/.nvmrc b/.pipeline-v3/.nvmrc
deleted file mode 100644
index 6b12bc745..000000000
--- a/.pipeline-v3/.nvmrc
+++ /dev/null
@@ -1 +0,0 @@
-v10.15.2
\ No newline at end of file
diff --git a/.pipeline-v3/build.js b/.pipeline-v3/build.js
deleted file mode 100755
index 3ac899f86..000000000
--- a/.pipeline-v3/build.js
+++ /dev/null
@@ -1,5 +0,0 @@
-'use strict';
-const task = require('./lib/build.js')
-const settings = require('./lib/config.js')
-
-task(Object.assign(settings, { phase: 'build'}))
diff --git a/.pipeline-v3/clean.js b/.pipeline-v3/clean.js
deleted file mode 100755
index 42231d7ff..000000000
--- a/.pipeline-v3/clean.js
+++ /dev/null
@@ -1,5 +0,0 @@
-'use strict';
-const settings = require('./lib/config.js')
-const task = require('./lib/clean.js')
-
-task(Object.assign(settings, { phase: settings.options.env}));
diff --git a/.pipeline-v3/deploy-unittest.js b/.pipeline-v3/deploy-unittest.js
deleted file mode 100644
index c6c95dedb..000000000
--- a/.pipeline-v3/deploy-unittest.js
+++ /dev/null
@@ -1,5 +0,0 @@
-'use strict';
-const settings = require('./lib/config.js')
-const task = require('./lib/deploy-unittest.js')
-
-task(Object.assign(settings, { phase: settings.options.env}));
diff --git a/.pipeline-v3/deploy.js b/.pipeline-v3/deploy.js
deleted file mode 100755
index 5e34c9484..000000000
--- a/.pipeline-v3/deploy.js
+++ /dev/null
@@ -1,7 +0,0 @@
-'use strict';
-const settings = require('./lib/config.js')
-const task = require('./lib/deploy.js')
-
-task(Object.assign(settings, { phase: settings.options.env}));
-
-console.log('real end of deploy')
\ No newline at end of file
diff --git a/.pipeline-v3/lib/build.js b/.pipeline-v3/lib/build.js
deleted file mode 100755
index 7af84d2b5..000000000
--- a/.pipeline-v3/lib/build.js
+++ /dev/null
@@ -1,57 +0,0 @@
-"use strict";
-const { OpenShiftClientX } = require("@bcgov/pipeline-cli");
-const path = require("path");
-
-module.exports = settings => {
- const phases = settings.phases;
- const options = settings.options;
- const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options));
- const phase = "build";
- let objects = [];
- const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift-v3"));
-
- // The building of your cool app goes here ▼▼▼
- //build envoy
- /*
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/envoy/envoy-bc.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'GIT_URL': oc.git.http_url,
- 'GIT_REF': oc.git.ref
- }
- }))
- */
-
- // build frontend
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-bc.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'GIT_URL': oc.git.http_url,
- 'GIT_REF': oc.git.ref
- }
- }))
-
- //build backend
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-bc.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'GIT_URL': oc.git.http_url,
- 'GIT_REF': oc.git.ref
- }
- }))
-
- oc.applyRecommendedLabels(
- objects,
- phases[phase].name,
- phase,
- phases[phase].changeId,
- phases[phase].instance,
- );
- oc.applyAndBuild(objects);
-};
diff --git a/.pipeline-v3/lib/clean.js b/.pipeline-v3/lib/clean.js
deleted file mode 100755
index 0c5ae9aea..000000000
--- a/.pipeline-v3/lib/clean.js
+++ /dev/null
@@ -1,134 +0,0 @@
-"use strict";
-const { OpenShiftClientX } = require("@bcgov/pipeline-cli");
-const KeyCloakClient = require('./keycloak');
-
-const getTargetPhases = (env, phases) => {
- let target_phase = [];
- for (const phase in phases) {
- if (env.match(/^(all|transient)$/) && phases[phase].transient) {
- target_phase.push(phase);
- } else if (env === phase) {
- target_phase.push(phase);
- break;
- }
- }
-
- return target_phase;
-};
-
-module.exports = settings => {
- const phases = settings.phases;
- const options = settings.options;
- const oc = new OpenShiftClientX(Object.assign({ namespace: phases.build.namespace }, options));
- const target_phases = getTargetPhases(options.env, phases);
-
- target_phases.forEach(k => {
- if (phases.hasOwnProperty(k)) {
-
- const phase = phases[k];
- oc.namespace(phase.namespace);
-
- if(k === 'dev') {
- const kc = new KeyCloakClient(settings, oc);
- kc.removeUris();
- }
-
- let buildConfigs = oc.get("bc", {
- selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- namespace: phase.namespace,
- });
- buildConfigs.forEach(bc => {
- if (bc.spec.output.to.kind == "ImageStreamTag") {
- oc.delete([`ImageStreamTag/${bc.spec.output.to.name}`], {
- "ignore-not-found": "true",
- wait: "true",
- namespace: phase.namespace,
- });
- }
- });
-
- let deploymentConfigs = oc.get("dc", {
- selector: `app=${phase.instance},env-id=${phase.changeId},env-name=${k},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- namespace: phase.namespace,
- });
- deploymentConfigs.forEach(dc => {
- dc.spec.triggers.forEach(trigger => {
- if (
- trigger.type == "ImageChange" &&
- trigger.imageChangeParams.from.kind == "ImageStreamTag"
- ) {
- oc.delete([`ImageStreamTag/${trigger.imageChangeParams.from.name}`], {
- "ignore-not-found": "true",
- wait: "true",
- namespace: phase.namespace,
- });
- }
- });
- });
-
- //get all statefulsets before they are deleted
- const statefulsets = oc.get("statefulset", {
- selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- namespace: phase.namespace,
- });
-
- oc.raw("delete", ["all"], {
- selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- wait: "true",
- namespace: phase.namespace,
- });
- oc.raw(
- "delete",
- ["pvc,Secret,configmap,endpoints,RoleBinding,role,ServiceAccount,Endpoints"],
- {
- selector: `app=${phase.instance},env-id=${phase.changeId},!shared,github-repo=${oc.git.repository},github-owner=${oc.git.owner}`,
- wait: "true",
- namespace: phase.namespace,
- },
- );
-
- //remove all the PVCs associated with each statefulset, after they get deleted by above delete all operation
- statefulsets.forEach(statefulset => {
- //delete PVCs mounted for statfulset
- oc.raw("delete", ["pvc"], {
- selector: `statefulset=${statefulset.metadata.name}`,
- "ignore-not-found": "true",
- wait: "true",
- namespace: phase.namespace,
- });
- /***
- //delete PVCs mounted in statfulset
- let statefulsetPVCs = oc.get("pvc", {
- selector: `statefulset=${statefulset.metadata.name}`,
- namespace: phase.namespace,
- });
- statefulsetPVCs.forEach(statefulsetPVC => {
- oc.delete([`pvc/${statefulsetPVC.metadata.name}`], {
- "ignore-not-found": "true",
- wait: "true",
- namespace: phase.namespace,
- });
- })
- ****/
- //delete configmaps create by patroni
- let patroniConfigmaps = oc.get("configmap", {
- selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`,
- namespace: phase.namespace,
- });
- if(Object.entries(patroniConfigmaps).length > 0) {
- oc.raw(
- "delete",
- ["configmap"],
- {
- selector: `app.kubernetes.io/name=patroni,cluster-name=${statefulset.metadata.name}`,
- wait: "true",
- "ignore-not-found": "true",
- namespace: phase.namespace,
- },
- );
- }
- });
-
- }
- });
-};
diff --git a/.pipeline-v3/lib/config.js b/.pipeline-v3/lib/config.js
deleted file mode 100644
index 633e58334..000000000
--- a/.pipeline-v3/lib/config.js
+++ /dev/null
@@ -1,55 +0,0 @@
-'use strict';
-const options= require('@bcgov/pipeline-cli').Util.parseArguments()
-const changeId = options.pr //aka pull-request
-const version = '1.0.0'
-const name = 'zeva'
-const ocpName = 'pathfinder'
-
-const phases = {
-
- build: {namespace:'tbiwaq-tools' , transient:true, name: `${name}`, phase: 'build',
- changeId:`${changeId}`, suffix: `-build-${changeId}` , instance: `${name}-build-${changeId}`,
- version:`${version}-${changeId}`, tag:`build-${version}-${changeId}`, ocpName: `${ocpName}`},
-
- dev: {namespace:'tbiwaq-dev', transient:true, name: `${name}`, ssoSuffix:'-dev',
- ssoName:'dev.oidc.gov.bc.ca', phase: 'dev' , changeId:`${changeId}`, suffix: `-dev-${changeId}`,
- instance: `${name}-dev-${changeId}` , version:`${version}-${changeId}`, tag:`dev-${version}-${changeId}`,
- host: `zeva-dev-${changeId}.${ocpName}.gov.bc.ca`, djangoDebug: 'True', logoutHost: 'logontest.gov.bc.ca',
- frontendCpuRequest: '100m', frontendCpuLimit: '700m', frontendMemoryRequest: '300M', frontendMemoryLimit: '4G', frontendReplicas: 1, creditTransferEnabled: 'true',
- backendCpuRequest: '300m', backendCpuLimit: '600m', backendMemoryRequest: '1G', backendMemoryLimit: '2G', backendHealthCheckDelay: 30, backendHost: `zeva-backend-dev-${changeId}.${ocpName}.gov.bc.ca`, backendReplicas: 1,
- minioCpuRequest: '100m', minioCpuLimit: '200m', minioMemoryRequest: '200M', minioMemoryLimit: '500M', minioPvcSize: '1G',
- schemaspyCpuRequest: '50m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160,
- rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '1G', rabbitmqReplica: 1, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard',
- patroniCpuRequest: '200m', patroniCpuLimit: '400m', patroniMemoryRequest: '250M', patroniMemoryLimit: '500M', patroniPvcSize: '2G', patroniReplica: 1, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`},
-
- test: {namespace:'tbiwaq-test', name: `${name}`, ssoSuffix:'-test',
- ssoName:'test.oidc.gov.bc.ca', phase: 'test' , changeId:`${changeId}`, suffix: `-test`,
- instance: `${name}-test`, version:`${version}`, tag:`test-${version}`,
- host: `zeva-test.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHost: 'logontest.gov.bc.ca',
- frontendCpuRequest: '300m', frontendCpuLimit: '600m', frontendMemoryRequest: '500M', frontendMemoryLimit: '1G', frontendReplicas: 2, frontendMinReplicas: 2, frontendMaxReplicas: 5, creditTransferEnabled: 'true',
- backendCpuRequest: '100m', backendCpuLimit: '500m', backendMemoryRequest: '500M', backendMemoryLimit: '2G', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 2, backendMaxReplicas: 5, backendHost: `zeva-backend-test.${ocpName}.gov.bc.ca`,
- minioCpuRequest: '100m', minioCpuLimit: '300m', minioMemoryRequest: '500M', minioMemoryLimit: '700M', minioPvcSize: '5G',
- schemaspyCpuRequest: '20m', schemaspyCpuLimit: '200m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160,
- rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '700M', rabbitmqPvcSize: '1G', rabbitmqReplica: 2, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard',
- patroniCpuRequest: '500m', patroniCpuLimit: '1000m', patroniMemoryRequest: '500M', patroniMemoryLimit: '1G', patroniPvcSize: '5G', patroniReplica: 2, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`},
-
- prod: {namespace:'tbiwaq-prod', name: `${name}`, ssoSuffix:'',
- ssoName:'oidc.gov.bc.ca', phase: 'prod' , changeId:`${changeId}`, suffix: `-prod`,
- instance: `${name}-prod`, version:`${version}`, tag:`prod-${version}`,
- host: `zeroemissionvehicles.${ocpName}.gov.bc.ca`, djangoDebug: 'False', logoutHost: 'logon7.gov.bc.ca',
- frontendCpuRequest: '300m', frontendCpuLimit: '600m', frontendMemoryRequest: '1G', frontendMemoryLimit: '2G', frontendReplicas: 2, frontendMinReplicas: 2, frontendMaxReplicas: 5, creditTransferEnabled: 'false',
- backendCpuRequest: '200m', backendCpuLimit: '700m', backendMemoryRequest: '1G', backendMemoryLimit: '2G', backendHealthCheckDelay: 30, backendReplicas: 1, backendMinReplicas: 2, backendMaxReplicas: 5, backendHost: `zeva-backend-prod.${ocpName}.gov.bc.ca`,
- minioCpuRequest: '100m', minioCpuLimit: '300m', minioMemoryRequest: '500M', minioMemoryLimit: '700M', minioPvcSize: '10G',
- schemaspyCpuRequest: '50m', schemaspyCpuLimit: '400m', schemaspyMemoryRequest: '150M', schemaspyMemoryLimit: '300M', schemaspyHealthCheckDelay: 160,
- rabbitmqCpuRequest: '250m', rabbitmqCpuLimit: '700m', rabbitmqMemoryRequest: '500M', rabbitmqMemoryLimit: '1G', rabbitmqPvcSize: '5G', rabbitmqReplica: 3, rabbitmqPostStartSleep: 120, storageClass: 'netapp-block-standard',
- patroniCpuRequest: '500m', patroniCpuLimit: '1000m', patroniMemoryRequest: '1G', patroniMemoryLimit: '2G', patroniPvcSize: '40G', patroniReplica: 3, storageClass: 'netapp-block-standard', ocpName: `${ocpName}`},
-
-};
-
-// This callback forces the node process to exit as failure.
-process.on('unhandledRejection', (reason) => {
- console.log(reason);
- process.exit(1);
-});
-
-module.exports = exports = {phases, options};
diff --git a/.pipeline-v3/lib/deploy-unittest.js b/.pipeline-v3/lib/deploy-unittest.js
deleted file mode 100644
index 83137acea..000000000
--- a/.pipeline-v3/lib/deploy-unittest.js
+++ /dev/null
@@ -1,67 +0,0 @@
-"use strict";
-const { OpenShiftClientX } = require("@bcgov/pipeline-cli");
-const path = require("path");
-
-module.exports = settings => {
- const phases = settings.phases;
- const options = settings.options;
- const phase = options.env;
- const changeId = phases[phase].changeId;
- const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options));
-
- const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift-v3"));
- var objects = [];
-
- // The deployment of your cool app goes here ▼▼▼
-
- //deploy separate database and backend pod for unit test
- if( phase === 'dev' ) {
-
- //create unit test database init scripts
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/zeva-postgresql-init.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix
- }
- }))
-
- //deploy postgresql unit test
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/postgresql-dc-unittest.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'ENV_NAME': phases[phase].phase
- }
- }))
-
- //deploy backend unit test
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/unittest/backend-dc-unittest.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'ENV_NAME': phases[phase].phase,
- 'BACKEND_HOST_NAME': phases[phase].backendHost,
- 'RABBITMQ_CLUSTER_NAME': 'rabbitmq-cluster',
- 'CPU_REQUEST': phases[phase].backendCpuRequest,
- 'CPU_LIMIT': '700m',
- 'MEMORY_REQUEST': phases[phase].backendMemoryRequest,
- 'MEMORY_LIMIT': phases[phase].backendMemoryLimit,
- 'HEALTH_CHECK_DELAY': phases[phase].backendHealthCheckDelay,
- 'REPLICAS': phases[phase].backendReplicas
- }
- }))
-
- }
-
- oc.applyRecommendedLabels(
- objects,
- phases[phase].name,
- phase,
- `${changeId}`,
- phases[phase].instance,
- );
- oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag);
- oc.applyAndDeploy(objects, phases[phase].instance);
-
-};
diff --git a/.pipeline-v3/lib/deploy.js b/.pipeline-v3/lib/deploy.js
deleted file mode 100755
index 6209d0c8d..000000000
--- a/.pipeline-v3/lib/deploy.js
+++ /dev/null
@@ -1,210 +0,0 @@
-"use strict";
-const { OpenShiftClientX } = require("@bcgov/pipeline-cli");
-const path = require("path");
-const KeyCloakClient = require('./keycloak');
-
-module.exports = settings => {
- const phases = settings.phases;
- const options = settings.options;
- const phase = options.env;
- const changeId = phases[phase].changeId;
- const oc = new OpenShiftClientX(Object.assign({namespace: phases[phase].namespace}, options));
-
- //add Valid Redirect URIs for the pull request to keycloak
- if(phase === 'dev') {
- const kc = new KeyCloakClient(settings, oc);
- kc.addUris();
- }
-
- const templatesLocalBaseUrl = oc.toFileUrl(path.resolve(__dirname, "../../openshift-v3"));
- var objects = [];
-
- // The deployment of your cool app goes here ▼▼▼
-
- // create configs
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/config/configmap.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'ENV_NAME': phases[phase].phase,
- 'HOST_NAME': phases[phase].host,
- 'BACKEND_HOST_NAME': phases[phase].backendHost,
- 'SSO_NAME': phases[phase].ssoName,
- 'KEYCLOAK_REALM': 'rzh2zkjq',
- 'DJANGO_DEBUG': phases[phase].djangoDebug,
- 'OCP_NAME': phases[phase].ocpName,
- 'LOGOUT_HOST': phases[phase].logoutHost
- }
- }))
-
- /*** remove minio deployment in pr pipeline, one pre-deployed minio will serve all prs
- * minio configurations stay in config.js unchanged
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/minio/minio-dc.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'ENV_NAME': phases[phase].phase,
- 'PVC_SIZE': phases[phase].minioPvcSize,
- 'CPU_REQUEST': phases[phase].minioCpuRequest,
- 'CPU_LIMIT': phases[phase].minioCpuLimit,
- 'MEMORY_REQUEST': phases[phase].minioMemoryRequest,
- 'MEMORY_LIMIT': phases[phase].minioMemoryRequest
- }
- }))
- */
-
- //deploy Patroni required secrets
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni/deployment-prereq.yaml`, {
- 'param': {
- 'NAME': 'patroni',
- 'SUFFIX': phases[phase].suffix
- }
- }))
- //deploy Patroni
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/patroni/deployment.yaml`, {
- 'param': {
- 'NAME': 'patroni',
- 'ENV_NAME': phases[phase].phase,
- 'SUFFIX': phases[phase].suffix,
- 'CPU_REQUEST': phases[phase].patroniCpuRequest,
- 'CPU_LIMIT': phases[phase].patroniCpuLimit,
- 'MEMORY_REQUEST': phases[phase].patroniMemoryRequest,
- 'MEMORY_LIMIT': phases[phase].patroniMemoryLimit,
- 'IMAGE_REGISTRY': 'docker-registry.default.svc:5000',
- 'IMAGE_STREAM_NAMESPACE': phases[phase].namespace,
- 'IMAGE_STREAM_TAG': 'patroni:v10-stable',
- 'REPLICA': phases[phase].patroniReplica,
- 'PVC_SIZE': phases[phase].patroniPvcSize,
- 'STORAGE_CLASS': phases[phase].storageClass
- }
- }))
-
- //only deploy rabbitmq secret and configmap, rabbitmq is not being used yet 20200921
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/rabbitmq/rabbitmq-secret-configmap-only.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'ENV_NAME': phases[phase].phase,
- 'SUFFIX': phases[phase].suffix,
- 'NAMESPACE': phases[phase].namespace,
- 'CLUSTER_NAME': 'rabbitmq-cluster'
- }
- }))
-
- /**
- //deploy rabbitmq, use docker image directly
- //POST_START_SLEEP is harded coded in the rabbitmq template, replacement was not successful
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/rabbitmq/rabbitmq-cluster-dc.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'ENV_NAME': phases[phase].phase,
- 'SUFFIX': phases[phase].suffix,
- 'NAMESPACE': phases[phase].namespace,
- 'CLUSTER_NAME': 'rabbitmq-cluster',
- 'ISTAG': `docker-registry.default.svc:5000/${phases[phase].namespace}/rabbitmq:3.8.3-management`,
- 'SERVICE_ACCOUNT': 'rabbitmq-discovery',
- 'VOLUME_SIZE': phases[phase].rabbitmqPvcSize,
- 'CPU_REQUEST': phases[phase].rabbitmqCpuRequest,
- 'CPU_LIMIT': phases[phase].rabbitmqCpuLimit,
- 'MEMORY_REQUEST': phases[phase].rabbitmqMemoryRequest,
- 'MEMORY_LIMIT': phases[phase].rabbitmqMemoryLimit,
- 'REPLICA': phases[phase].rabbitmqReplica,
- 'POST_START_SLEEP': phases[phase].rabbitmqPostStartSleep,
- 'STORAGE_CLASS': phases[phase].storageClass
- }
- }))
- */
-
- // deploy frontend configmap
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-configmap.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'CREDIT_TRANSFER_ENABLED': phases[phase].creditTransferEnabled
- }
- }))
-
- // deploy frontend
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-dc.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'ENV_NAME': phases[phase].phase,
- 'HOST_NAME': phases[phase].host,
- 'CPU_REQUEST': phases[phase].frontendCpuRequest,
- 'CPU_LIMIT': phases[phase].frontendCpuLimit,
- 'MEMORY_REQUEST': phases[phase].frontendMemoryRequest,
- 'MEMORY_LIMIT': phases[phase].frontendMemoryLimit,
- 'REPLICAS': phases[phase].frontendReplicas
- }
- }))
-
- //deploy backend
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-dc.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'ENV_NAME': phases[phase].phase,
- 'BACKEND_HOST_NAME': phases[phase].backendHost,
- 'RABBITMQ_CLUSTER_NAME': 'rabbitmq-cluster',
- 'CPU_REQUEST': phases[phase].backendCpuRequest,
- 'CPU_LIMIT': phases[phase].backendCpuLimit,
- 'MEMORY_REQUEST': phases[phase].backendMemoryRequest,
- 'MEMORY_LIMIT': phases[phase].backendMemoryLimit,
- 'HEALTH_CHECK_DELAY': phases[phase].backendHealthCheckDelay,
- 'REPLICAS': phases[phase].backendReplicas
- }
- }))
-
- //deploy schemaspy
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/schemaspy/schemaspy-dc.yaml`, {
- 'param': {
- 'SUFFIX': phases[phase].suffix,
- 'ENV_NAME': phases[phase].phase,
- 'CPU_REQUEST': phases[phase].schemaspyCpuRequest,
- 'CPU_LIMIT': phases[phase].schemaspyCpuLimit,
- 'MEMORY_REQUEST': phases[phase].schemaspyMemoryRequest,
- 'MEMORY_LIMIT': phases[phase].schemaspyMemoryLimit,
- 'HEALTH_CHECK_DELAY': phases[phase].schemaspyHealthCheckDelay,
- 'OCP_NAME': phases[phase].ocpName
- }
- }))
-
- //add autoacaler
- /*****
- if(phase === 'test' || phase === 'prod') {
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-autoscaler.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'MIN_REPLICAS': phases[phase].frontendMinReplicas,
- 'MAX_REPLICAS': phases[phase].frontendMaxReplicas
- }
- }))
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/backend/backend-autoscaler.yaml`, {
- 'param': {
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'MIN_REPLICAS': phases[phase].backendMinReplicas,
- 'MAX_REPLICAS': phases[phase].backendMaxReplicas
- }
- }))
- }
- ********/
-
- console.log('Start to deploy ..')
-
- oc.applyRecommendedLabels(
- objects,
- phases[phase].name,
- phase,
- `${changeId}`,
- phases[phase].instance,
- );
- oc.importImageStreams(objects, phases[phase].tag, phases.build.namespace, phases.build.tag);
- oc.applyAndDeploy(objects, phases[phase].instance);
-
- console.log('End of deploy ..')
-
-};
diff --git a/.pipeline-v3/lib/keycloak.js b/.pipeline-v3/lib/keycloak.js
deleted file mode 100644
index 5310a10b6..000000000
--- a/.pipeline-v3/lib/keycloak.js
+++ /dev/null
@@ -1,137 +0,0 @@
-"use strict";
-const axios = require("axios");
-const _ = require("lodash");
-//code reference https://github.com/bcgov/HMCR/blob/0.7/.pipeline/lib/keycloak.js
-module.exports = class KeyCloakClient {
- constructor(settings, oc) {
- this.phases = settings.phases;
- this.options = settings.options;
- this.oc = oc;
- this.zevaHost = this.phases.dev.host;
- }
-
- async init() {
-
- this.getSecrets();
-
- this.apiTokenPath = `/auth/realms/${this.realmId}/protocol/openid-connect/token`;
- this.zevaPublicClientPath = `auth/admin/realms/${this.realmId}/clients/${this.zevaClientId}`;
-
- this.api = axios.create({
- baseURL: `https://${this.ssoHost}`
- });
-
- const token = await this.getAccessToken();
-
- this.api.defaults.headers.common = {
- Authorization: `Bearer ${token}`
- };
- }
-
- getSecrets() {
- const keycloakSecret = this.oc.raw("get", [
- "secret",
- "zeva-keycloak",
- "-o",
- "json"
- ]);
- const secret = JSON.parse(keycloakSecret.stdout).data;
-
- this.clientId = Buffer.from(secret.clientId, "base64").toString();
- this.clientSecret = Buffer.from(secret.clientSecret, "base64").toString();
- this.zevaClientId = Buffer.from(secret.zevaPublic, "base64").toString();
- this.realmId = Buffer.from(secret.realmId, "base64").toString();
- this.ssoHost = Buffer.from(secret.host, "base64").toString();
-
- if (!this.clientId || !this.clientSecret || !this.zevaClientId)
- throw new Error(
- "Unable to retrieve Keycloak service account info from OpenShift"
- );
- }
-
- getAccessToken() {
-
- return this.api
- .post(this.apiTokenPath, "grant_type=client_credentials", {
- headers: { "Content-Type": "application/x-www-form-urlencoded" },
- auth: {
- username: this.clientId,
- password: this.clientSecret
- }
- })
- .then(function(response) {
- if (!response.data.access_token)
- throw new Error(
- "Unable to retrieve Keycloak service account access token"
- );
-
- return Promise.resolve(response.data.access_token);
- });
- }
-
- async getUris() {
-
- const response = await this.api.get(this.zevaPublicClientPath);
-
- const data = { ...response.data };
- const redirectUris = data.redirectUris;
-
- return { data, redirectUris };
- }
-
- async addUris() {
- await this.init();
-
- console.log("Attempting to add RedirectUri and WebOrigins");
-
- const { data, redirectUris} = await this.getUris();
-
- const putData = { id: data.id, clientId: data.clientId };
-
- const hasRedirectUris = redirectUris.find(item =>
- item.includes(this.zevaHost)
- );
-
- if (!hasRedirectUris) {
- redirectUris.push(`https://${this.zevaHost}/*`);
- putData.redirectUris = redirectUris;
- }
-
- if (!(hasRedirectUris)) {
- this.api
- .put(this.zevaPublicClientPath, putData)
- .then(() => console.log("RedirectUri and WebOrigins added."));
- } else {
- console.log("RedirectUri and WebOrigins add skipped.");
- }
- }
-
- async removeUris() {
- await this.init();
-
- console.log("Attempting to remove RedirectUri and WebOrigins");
-
- const { data, redirectUris } = await this.getUris();
-
- const putData = { id: data.id, clientId: data.clientId };
-
- const hasRedirectUris = redirectUris.find(item =>
- item.includes(this.zevaHost)
- );
-
- if (hasRedirectUris) {
- putData.redirectUris = redirectUris.filter(
- item => !item.includes(this.zevaHost)
- );
- }
-
- if (hasRedirectUris) {
- this.api
- .put(this.zevaPublicClientPath, putData)
- .then(() => console.log("RedirectUri and WebOrigins removed."));
- } else {
- console.log("RedirectUri and WebOrigins remove skipped.");
- }
-
- }
-};
diff --git a/.pipeline-v3/npmw b/.pipeline-v3/npmw
deleted file mode 100755
index 1eed7c953..000000000
--- a/.pipeline-v3/npmw
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/sh
-set +x
-type -t nvm && nvm deactivate
-export NVM_DIR="$(git rev-parse --show-toplevel)/.nvm"
-if [ ! -f "$NVM_DIR/nvm.sh" ]; then
- mkdir -p "${NVM_DIR}"
- curl -sSL -o- https://raw.githubusercontent.com/creationix/nvm/v0.34.0/install.sh | bash &>/dev/null
-fi
-source "$NVM_DIR/nvm.sh" &>/dev/null
-METHOD=script nvm install --no-progress &>/dev/null
-nvm use &>/dev/null
-exec npm "$@"
diff --git a/.pipeline-v3/package-lock.json b/.pipeline-v3/package-lock.json
deleted file mode 100644
index 08d80ef00..000000000
--- a/.pipeline-v3/package-lock.json
+++ /dev/null
@@ -1,988 +0,0 @@
-{
- "name": "pipeline",
- "version": "1.0.0",
- "lockfileVersion": 1,
- "requires": true,
- "dependencies": {
- "@bcgov/gh-deploy": {
- "version": "1.1.4",
- "resolved": "https://registry.npmjs.org/@bcgov/gh-deploy/-/gh-deploy-1.1.4.tgz",
- "integrity": "sha512-O5XNCnbQouxaZbL8fkijpnh6odcWRhsymzSt6O0F3vC1uNY9scQoGf1fP6R4wllEgjgorrASQu7xi2fShsHikA==",
- "requires": {
- "@oclif/command": "^1.5.19",
- "@oclif/config": "^1.13.3",
- "@oclif/plugin-help": "^2.2.1",
- "@octokit/rest": "^16.43.1",
- "globby": "^10.0.1"
- }
- },
- "@bcgov/pipeline-cli": {
- "version": "1.0.1-0",
- "resolved": "https://registry.npmjs.org/@bcgov/pipeline-cli/-/pipeline-cli-1.0.1-0.tgz",
- "integrity": "sha512-DXneptaJuG81Vo+GotZaS4M78uOVnocCCzte6UghOkO+Bt8EQ6xlPblITPXiNDCufO7gOnEmB4T/pyh1ZBnvcw==",
- "requires": {
- "debug": "^4.1.0",
- "lodash.isempty": "^4.0.1",
- "lodash.isfunction": "^3.0.9",
- "lodash.isplainobject": "^4.0.6",
- "lodash.isstring": "^4.0.1",
- "snakecase-keys": "^3.1.0"
- }
- },
- "@nodelib/fs.scandir": {
- "version": "2.1.3",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz",
- "integrity": "sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw==",
- "requires": {
- "@nodelib/fs.stat": "2.0.3",
- "run-parallel": "^1.1.9"
- }
- },
- "@nodelib/fs.stat": {
- "version": "2.0.3",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz",
- "integrity": "sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA=="
- },
- "@nodelib/fs.walk": {
- "version": "1.2.4",
- "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz",
- "integrity": "sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ==",
- "requires": {
- "@nodelib/fs.scandir": "2.1.3",
- "fastq": "^1.6.0"
- }
- },
- "@oclif/command": {
- "version": "1.5.19",
- "resolved": "https://registry.npmjs.org/@oclif/command/-/command-1.5.19.tgz",
- "integrity": "sha512-6+iaCMh/JXJaB2QWikqvGE9//wLEVYYwZd5sud8aLoLKog1Q75naZh2vlGVtg5Mq/NqpqGQvdIjJb3Bm+64AUQ==",
- "requires": {
- "@oclif/config": "^1",
- "@oclif/errors": "^1.2.2",
- "@oclif/parser": "^3.8.3",
- "@oclif/plugin-help": "^2",
- "debug": "^4.1.1",
- "semver": "^5.6.0"
- }
- },
- "@oclif/config": {
- "version": "1.14.0",
- "resolved": "https://registry.npmjs.org/@oclif/config/-/config-1.14.0.tgz",
- "integrity": "sha512-KsOP/mx9lzTah+EtGqLUXN3PDL0J3zb9/dTneFyiUK2K6T7vFEGhV6OasmqTh4uMZHGYTGrNPV8x/Yw6qZNL6A==",
- "requires": {
- "@oclif/errors": "^1.0.0",
- "@oclif/parser": "^3.8.0",
- "debug": "^4.1.1",
- "tslib": "^1.9.3"
- }
- },
- "@oclif/errors": {
- "version": "1.2.2",
- "resolved": "https://registry.npmjs.org/@oclif/errors/-/errors-1.2.2.tgz",
- "integrity": "sha512-Eq8BFuJUQcbAPVofDxwdE0bL14inIiwt5EaKRVY9ZDIG11jwdXZqiQEECJx0VfnLyUZdYfRd/znDI/MytdJoKg==",
- "requires": {
- "clean-stack": "^1.3.0",
- "fs-extra": "^7.0.0",
- "indent-string": "^3.2.0",
- "strip-ansi": "^5.0.0",
- "wrap-ansi": "^4.0.0"
- }
- },
- "@oclif/linewrap": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/@oclif/linewrap/-/linewrap-1.0.0.tgz",
- "integrity": "sha512-Ups2dShK52xXa8w6iBWLgcjPJWjais6KPJQq3gQ/88AY6BXoTX+MIGFPrWQO1KLMiQfoTpcLnUwloN4brrVUHw=="
- },
- "@oclif/parser": {
- "version": "3.8.4",
- "resolved": "https://registry.npmjs.org/@oclif/parser/-/parser-3.8.4.tgz",
- "integrity": "sha512-cyP1at3l42kQHZtqDS3KfTeyMvxITGwXwH1qk9ktBYvqgMp5h4vHT+cOD74ld3RqJUOZY/+Zi9lb4Tbza3BtuA==",
- "requires": {
- "@oclif/linewrap": "^1.0.0",
- "chalk": "^2.4.2",
- "tslib": "^1.9.3"
- }
- },
- "@oclif/plugin-help": {
- "version": "2.2.3",
- "resolved": "https://registry.npmjs.org/@oclif/plugin-help/-/plugin-help-2.2.3.tgz",
- "integrity": "sha512-bGHUdo5e7DjPJ0vTeRBMIrfqTRDBfyR5w0MP41u0n3r7YG5p14lvMmiCXxi6WDaP2Hw5nqx3PnkAIntCKZZN7g==",
- "requires": {
- "@oclif/command": "^1.5.13",
- "chalk": "^2.4.1",
- "indent-string": "^4.0.0",
- "lodash.template": "^4.4.0",
- "string-width": "^3.0.0",
- "strip-ansi": "^5.0.0",
- "widest-line": "^2.0.1",
- "wrap-ansi": "^4.0.0"
- },
- "dependencies": {
- "indent-string": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
- "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg=="
- },
- "string-width": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
- "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
- "requires": {
- "emoji-regex": "^7.0.1",
- "is-fullwidth-code-point": "^2.0.0",
- "strip-ansi": "^5.1.0"
- }
- }
- }
- },
- "@octokit/auth-token": {
- "version": "2.4.0",
- "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-2.4.0.tgz",
- "integrity": "sha512-eoOVMjILna7FVQf96iWc3+ZtE/ZT6y8ob8ZzcqKY1ibSQCnu4O/B7pJvzMx5cyZ/RjAff6DAdEb0O0Cjcxidkg==",
- "requires": {
- "@octokit/types": "^2.0.0"
- }
- },
- "@octokit/endpoint": {
- "version": "5.5.3",
- "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-5.5.3.tgz",
- "integrity": "sha512-EzKwkwcxeegYYah5ukEeAI/gYRLv2Y9U5PpIsseGSFDk+G3RbipQGBs8GuYS1TLCtQaqoO66+aQGtITPalxsNQ==",
- "requires": {
- "@octokit/types": "^2.0.0",
- "is-plain-object": "^3.0.0",
- "universal-user-agent": "^5.0.0"
- },
- "dependencies": {
- "universal-user-agent": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-5.0.0.tgz",
- "integrity": "sha512-B5TPtzZleXyPrUMKCpEHFmVhMN6EhmJYjG5PQna9s7mXeSqGTLap4OpqLl5FCEFUI3UBmllkETwKf/db66Y54Q==",
- "requires": {
- "os-name": "^3.1.0"
- }
- }
- }
- },
- "@octokit/plugin-paginate-rest": {
- "version": "1.1.2",
- "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-1.1.2.tgz",
- "integrity": "sha512-jbsSoi5Q1pj63sC16XIUboklNw+8tL9VOnJsWycWYR78TKss5PVpIPb1TUUcMQ+bBh7cY579cVAWmf5qG+dw+Q==",
- "requires": {
- "@octokit/types": "^2.0.1"
- }
- },
- "@octokit/plugin-request-log": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-1.0.0.tgz",
- "integrity": "sha512-ywoxP68aOT3zHCLgWZgwUJatiENeHE7xJzYjfz8WI0goynp96wETBF+d95b8g/uL4QmS6owPVlaxiz3wyMAzcw=="
- },
- "@octokit/plugin-rest-endpoint-methods": {
- "version": "2.4.0",
- "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-2.4.0.tgz",
- "integrity": "sha512-EZi/AWhtkdfAYi01obpX0DF7U6b1VRr30QNQ5xSFPITMdLSfhcBqjamE3F+sKcxPbD7eZuMHu3Qkk2V+JGxBDQ==",
- "requires": {
- "@octokit/types": "^2.0.1",
- "deprecation": "^2.3.1"
- }
- },
- "@octokit/request": {
- "version": "5.3.2",
- "resolved": "https://registry.npmjs.org/@octokit/request/-/request-5.3.2.tgz",
- "integrity": "sha512-7NPJpg19wVQy1cs2xqXjjRq/RmtSomja/VSWnptfYwuBxLdbYh2UjhGi0Wx7B1v5Iw5GKhfFDQL7jM7SSp7K2g==",
- "requires": {
- "@octokit/endpoint": "^5.5.0",
- "@octokit/request-error": "^1.0.1",
- "@octokit/types": "^2.0.0",
- "deprecation": "^2.0.0",
- "is-plain-object": "^3.0.0",
- "node-fetch": "^2.3.0",
- "once": "^1.4.0",
- "universal-user-agent": "^5.0.0"
- },
- "dependencies": {
- "universal-user-agent": {
- "version": "5.0.0",
- "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-5.0.0.tgz",
- "integrity": "sha512-B5TPtzZleXyPrUMKCpEHFmVhMN6EhmJYjG5PQna9s7mXeSqGTLap4OpqLl5FCEFUI3UBmllkETwKf/db66Y54Q==",
- "requires": {
- "os-name": "^3.1.0"
- }
- }
- }
- },
- "@octokit/request-error": {
- "version": "1.2.1",
- "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-1.2.1.tgz",
- "integrity": "sha512-+6yDyk1EES6WK+l3viRDElw96MvwfJxCt45GvmjDUKWjYIb3PJZQkq3i46TwGwoPD4h8NmTrENmtyA1FwbmhRA==",
- "requires": {
- "@octokit/types": "^2.0.0",
- "deprecation": "^2.0.0",
- "once": "^1.4.0"
- }
- },
- "@octokit/rest": {
- "version": "16.43.1",
- "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-16.43.1.tgz",
- "integrity": "sha512-gfFKwRT/wFxq5qlNjnW2dh+qh74XgTQ2B179UX5K1HYCluioWj8Ndbgqw2PVqa1NnVJkGHp2ovMpVn/DImlmkw==",
- "requires": {
- "@octokit/auth-token": "^2.4.0",
- "@octokit/plugin-paginate-rest": "^1.1.1",
- "@octokit/plugin-request-log": "^1.0.0",
- "@octokit/plugin-rest-endpoint-methods": "2.4.0",
- "@octokit/request": "^5.2.0",
- "@octokit/request-error": "^1.0.2",
- "atob-lite": "^2.0.0",
- "before-after-hook": "^2.0.0",
- "btoa-lite": "^1.0.0",
- "deprecation": "^2.0.0",
- "lodash.get": "^4.4.2",
- "lodash.set": "^4.3.2",
- "lodash.uniq": "^4.5.0",
- "octokit-pagination-methods": "^1.1.0",
- "once": "^1.4.0",
- "universal-user-agent": "^4.0.0"
- }
- },
- "@octokit/types": {
- "version": "2.3.1",
- "resolved": "https://registry.npmjs.org/@octokit/types/-/types-2.3.1.tgz",
- "integrity": "sha512-rvJP1Y9A/+Cky2C3var1vsw3Lf5Rjn/0sojNl2AjCX+WbpIHYccaJ46abrZoIxMYnOToul6S9tPytUVkFI7CXQ==",
- "requires": {
- "@types/node": ">= 8"
- }
- },
- "@types/events": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/@types/events/-/events-3.0.0.tgz",
- "integrity": "sha512-EaObqwIvayI5a8dCzhFrjKzVwKLxjoG9T6Ppd5CEo07LRKfQ8Yokw54r5+Wq7FaBQ+yXRvQAYPrHwya1/UFt9g=="
- },
- "@types/glob": {
- "version": "7.1.1",
- "resolved": "https://registry.npmjs.org/@types/glob/-/glob-7.1.1.tgz",
- "integrity": "sha512-1Bh06cbWJUHMC97acuD6UMG29nMt0Aqz1vF3guLfG+kHHJhy3AyohZFFxYk2f7Q1SQIrNwvncxAE0N/9s70F2w==",
- "requires": {
- "@types/events": "*",
- "@types/minimatch": "*",
- "@types/node": "*"
- }
- },
- "@types/minimatch": {
- "version": "3.0.3",
- "resolved": "https://registry.npmjs.org/@types/minimatch/-/minimatch-3.0.3.tgz",
- "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA=="
- },
- "@types/node": {
- "version": "13.7.6",
- "resolved": "https://registry.npmjs.org/@types/node/-/node-13.7.6.tgz",
- "integrity": "sha512-eyK7MWD0R1HqVTp+PtwRgFeIsemzuj4gBFSQxfPHY5iMjS7474e5wq+VFgTcdpyHeNxyKSaetYAjdMLJlKoWqA=="
- },
- "ansi-regex": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
- "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg=="
- },
- "ansi-styles": {
- "version": "3.2.1",
- "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
- "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
- "requires": {
- "color-convert": "^1.9.0"
- }
- },
- "array-union": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
- "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw=="
- },
- "atob-lite": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/atob-lite/-/atob-lite-2.0.0.tgz",
- "integrity": "sha1-D+9a1G8b16hQLGVyfwNn1e5D1pY="
- },
- "axios": {
- "version": "0.21.1",
- "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.1.tgz",
- "integrity": "sha512-dKQiRHxGD9PPRIUNIWvZhPTPpl1rf/OxTYKsqKUDjBwYylTvV7SjSHJb9ratfyzM6wCdLCOYLzs73qpg5c4iGA==",
- "requires": {
- "follow-redirects": "^1.10.0"
- }
- },
- "balanced-match": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
- "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c="
- },
- "before-after-hook": {
- "version": "2.1.0",
- "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-2.1.0.tgz",
- "integrity": "sha512-IWIbu7pMqyw3EAJHzzHbWa85b6oud/yfKYg5rqB5hNE8CeMi3nX+2C2sj0HswfblST86hpVEOAb9x34NZd6P7A=="
- },
- "brace-expansion": {
- "version": "1.1.11",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
- "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
- "requires": {
- "balanced-match": "^1.0.0",
- "concat-map": "0.0.1"
- }
- },
- "braces": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz",
- "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==",
- "requires": {
- "fill-range": "^7.0.1"
- }
- },
- "btoa-lite": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/btoa-lite/-/btoa-lite-1.0.0.tgz",
- "integrity": "sha1-M3dm2hWAEhD92VbCLpxokaudAzc="
- },
- "chalk": {
- "version": "2.4.2",
- "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
- "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
- "requires": {
- "ansi-styles": "^3.2.1",
- "escape-string-regexp": "^1.0.5",
- "supports-color": "^5.3.0"
- }
- },
- "clean-stack": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-1.3.0.tgz",
- "integrity": "sha1-noIVAa6XmYbEax1m0tQy2y/UrjE="
- },
- "color-convert": {
- "version": "1.9.3",
- "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
- "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
- "requires": {
- "color-name": "1.1.3"
- }
- },
- "color-name": {
- "version": "1.1.3",
- "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
- "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
- },
- "concat-map": {
- "version": "0.0.1",
- "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
- "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s="
- },
- "cross-spawn": {
- "version": "6.0.5",
- "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
- "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
- "requires": {
- "nice-try": "^1.0.4",
- "path-key": "^2.0.1",
- "semver": "^5.5.0",
- "shebang-command": "^1.2.0",
- "which": "^1.2.9"
- }
- },
- "debug": {
- "version": "4.1.1",
- "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
- "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
- "requires": {
- "ms": "^2.1.1"
- }
- },
- "deprecation": {
- "version": "2.3.1",
- "resolved": "https://registry.npmjs.org/deprecation/-/deprecation-2.3.1.tgz",
- "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ=="
- },
- "dir-glob": {
- "version": "3.0.1",
- "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
- "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
- "requires": {
- "path-type": "^4.0.0"
- }
- },
- "emoji-regex": {
- "version": "7.0.3",
- "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
- "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA=="
- },
- "end-of-stream": {
- "version": "1.4.4",
- "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
- "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
- "requires": {
- "once": "^1.4.0"
- }
- },
- "escape-string-regexp": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
- "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ="
- },
- "execa": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/execa/-/execa-1.0.0.tgz",
- "integrity": "sha512-adbxcyWV46qiHyvSp50TKt05tB4tK3HcmF7/nxfAdhnox83seTDbwnaqKO4sXRy7roHAIFqJP/Rw/AuEbX61LA==",
- "requires": {
- "cross-spawn": "^6.0.0",
- "get-stream": "^4.0.0",
- "is-stream": "^1.1.0",
- "npm-run-path": "^2.0.0",
- "p-finally": "^1.0.0",
- "signal-exit": "^3.0.0",
- "strip-eof": "^1.0.0"
- }
- },
- "fast-glob": {
- "version": "3.2.2",
- "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.2.tgz",
- "integrity": "sha512-UDV82o4uQyljznxwMxyVRJgZZt3O5wENYojjzbaGEGZgeOxkLFf+V4cnUD+krzb2F72E18RhamkMZ7AdeggF7A==",
- "requires": {
- "@nodelib/fs.stat": "^2.0.2",
- "@nodelib/fs.walk": "^1.2.3",
- "glob-parent": "^5.1.0",
- "merge2": "^1.3.0",
- "micromatch": "^4.0.2",
- "picomatch": "^2.2.1"
- }
- },
- "fastq": {
- "version": "1.6.0",
- "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.6.0.tgz",
- "integrity": "sha512-jmxqQ3Z/nXoeyDmWAzF9kH1aGZSis6e/SbfPmJpUnyZ0ogr6iscHQaml4wsEepEWSdtmpy+eVXmCRIMpxaXqOA==",
- "requires": {
- "reusify": "^1.0.0"
- }
- },
- "fill-range": {
- "version": "7.0.1",
- "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz",
- "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==",
- "requires": {
- "to-regex-range": "^5.0.1"
- }
- },
- "follow-redirects": {
- "version": "1.13.1",
- "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.13.1.tgz",
- "integrity": "sha512-SSG5xmZh1mkPGyKzjZP8zLjltIfpW32Y5QpdNJyjcfGxK3qo3NDDkZOZSFiGn1A6SclQxY9GzEwAHQ3dmYRWpg=="
- },
- "fs-extra": {
- "version": "7.0.1",
- "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
- "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
- "requires": {
- "graceful-fs": "^4.1.2",
- "jsonfile": "^4.0.0",
- "universalify": "^0.1.0"
- }
- },
- "fs.realpath": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
- "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8="
- },
- "get-stream": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
- "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
- "requires": {
- "pump": "^3.0.0"
- }
- },
- "glob": {
- "version": "7.1.6",
- "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
- "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
- "requires": {
- "fs.realpath": "^1.0.0",
- "inflight": "^1.0.4",
- "inherits": "2",
- "minimatch": "^3.0.4",
- "once": "^1.3.0",
- "path-is-absolute": "^1.0.0"
- }
- },
- "glob-parent": {
- "version": "5.1.0",
- "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.0.tgz",
- "integrity": "sha512-qjtRgnIVmOfnKUE3NJAQEdk+lKrxfw8t5ke7SXtfMTHcjsBfOfWXCQfdb30zfDoZQ2IRSIiidmjtbHZPZ++Ihw==",
- "requires": {
- "is-glob": "^4.0.1"
- }
- },
- "globby": {
- "version": "10.0.2",
- "resolved": "https://registry.npmjs.org/globby/-/globby-10.0.2.tgz",
- "integrity": "sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg==",
- "requires": {
- "@types/glob": "^7.1.1",
- "array-union": "^2.1.0",
- "dir-glob": "^3.0.1",
- "fast-glob": "^3.0.3",
- "glob": "^7.1.3",
- "ignore": "^5.1.1",
- "merge2": "^1.2.3",
- "slash": "^3.0.0"
- }
- },
- "graceful-fs": {
- "version": "4.2.3",
- "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.3.tgz",
- "integrity": "sha512-a30VEBm4PEdx1dRB7MFK7BejejvCvBronbLjht+sHuGYj8PHs7M/5Z+rt5lw551vZ7yfTCj4Vuyy3mSJytDWRQ=="
- },
- "has-flag": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
- "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0="
- },
- "ignore": {
- "version": "5.1.4",
- "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.4.tgz",
- "integrity": "sha512-MzbUSahkTW1u7JpKKjY7LCARd1fU5W2rLdxlM4kdkayuCwZImjkpluF9CM1aLewYJguPDqewLam18Y6AU69A8A=="
- },
- "indent-string": {
- "version": "3.2.0",
- "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz",
- "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok="
- },
- "inflight": {
- "version": "1.0.6",
- "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
- "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
- "requires": {
- "once": "^1.3.0",
- "wrappy": "1"
- }
- },
- "inherits": {
- "version": "2.0.4",
- "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
- "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
- },
- "is-extglob": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
- "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI="
- },
- "is-fullwidth-code-point": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
- "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8="
- },
- "is-glob": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
- "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
- "requires": {
- "is-extglob": "^2.1.1"
- }
- },
- "is-number": {
- "version": "7.0.0",
- "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
- "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="
- },
- "is-plain-object": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-3.0.0.tgz",
- "integrity": "sha512-tZIpofR+P05k8Aocp7UI/2UTa9lTJSebCXpFFoR9aibpokDj/uXBsJ8luUu0tTVYKkMU6URDUuOfJZ7koewXvg==",
- "requires": {
- "isobject": "^4.0.0"
- }
- },
- "is-stream": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz",
- "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ="
- },
- "isexe": {
- "version": "2.0.0",
- "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
- "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA="
- },
- "isobject": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/isobject/-/isobject-4.0.0.tgz",
- "integrity": "sha512-S/2fF5wH8SJA/kmwr6HYhK/RI/OkhD84k8ntalo0iJjZikgq1XFvR5M8NPT1x5F7fBwCG3qHfnzeP/Vh/ZxCUA=="
- },
- "jsonfile": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
- "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
- "requires": {
- "graceful-fs": "^4.1.6"
- }
- },
- "lodash": {
- "version": "4.17.20",
- "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.20.tgz",
- "integrity": "sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA=="
- },
- "lodash._reinterpolate": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/lodash._reinterpolate/-/lodash._reinterpolate-3.0.0.tgz",
- "integrity": "sha1-DM8tiRZq8Ds2Y8eWU4t1rG4RTZ0="
- },
- "lodash.get": {
- "version": "4.4.2",
- "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz",
- "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk="
- },
- "lodash.isempty": {
- "version": "4.4.0",
- "resolved": "https://registry.npmjs.org/lodash.isempty/-/lodash.isempty-4.4.0.tgz",
- "integrity": "sha1-b4bL7di+TsmHvpqvM8loTbGzHn4="
- },
- "lodash.isfunction": {
- "version": "3.0.9",
- "resolved": "https://registry.npmjs.org/lodash.isfunction/-/lodash.isfunction-3.0.9.tgz",
- "integrity": "sha512-AirXNj15uRIMMPihnkInB4i3NHeb4iBtNg9WRWuK2o31S+ePwwNmDPaTL3o7dTJ+VXNZim7rFs4rxN4YU1oUJw=="
- },
- "lodash.isplainobject": {
- "version": "4.0.6",
- "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
- "integrity": "sha1-fFJqUtibRcRcxpC4gWO+BJf1UMs="
- },
- "lodash.isstring": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/lodash.isstring/-/lodash.isstring-4.0.1.tgz",
- "integrity": "sha1-1SfftUVuynzJu5XV2ur4i6VKVFE="
- },
- "lodash.set": {
- "version": "4.3.2",
- "resolved": "https://registry.npmjs.org/lodash.set/-/lodash.set-4.3.2.tgz",
- "integrity": "sha1-2HV7HagH3eJIFrDWqEvqGnYjCyM="
- },
- "lodash.template": {
- "version": "4.5.0",
- "resolved": "https://registry.npmjs.org/lodash.template/-/lodash.template-4.5.0.tgz",
- "integrity": "sha512-84vYFxIkmidUiFxidA/KjjH9pAycqW+h980j7Fuz5qxRtO9pgB7MDFTdys1N7A5mcucRiDyEq4fusljItR1T/A==",
- "requires": {
- "lodash._reinterpolate": "^3.0.0",
- "lodash.templatesettings": "^4.0.0"
- }
- },
- "lodash.templatesettings": {
- "version": "4.2.0",
- "resolved": "https://registry.npmjs.org/lodash.templatesettings/-/lodash.templatesettings-4.2.0.tgz",
- "integrity": "sha512-stgLz+i3Aa9mZgnjr/O+v9ruKZsPsndy7qPZOchbqk2cnTU1ZaldKK+v7m54WoKIyxiuMZTKT2H81F8BeAc3ZQ==",
- "requires": {
- "lodash._reinterpolate": "^3.0.0"
- }
- },
- "lodash.uniq": {
- "version": "4.5.0",
- "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
- "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M="
- },
- "macos-release": {
- "version": "2.3.0",
- "resolved": "https://registry.npmjs.org/macos-release/-/macos-release-2.3.0.tgz",
- "integrity": "sha512-OHhSbtcviqMPt7yfw5ef5aghS2jzFVKEFyCJndQt2YpSQ9qRVSEv2axSJI1paVThEu+FFGs584h/1YhxjVqajA=="
- },
- "map-obj": {
- "version": "4.1.0",
- "resolved": "https://registry.npmjs.org/map-obj/-/map-obj-4.1.0.tgz",
- "integrity": "sha512-glc9y00wgtwcDmp7GaE/0b0OnxpNJsVf3ael/An6Fe2Q51LLwN1er6sdomLRzz5h0+yMpiYLhWYF5R7HeqVd4g=="
- },
- "merge2": {
- "version": "1.3.0",
- "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.3.0.tgz",
- "integrity": "sha512-2j4DAdlBOkiSZIsaXk4mTE3sRS02yBHAtfy127xRV3bQUFqXkjHCHLW6Scv7DwNRbIWNHH8zpnz9zMaKXIdvYw=="
- },
- "micromatch": {
- "version": "4.0.2",
- "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.2.tgz",
- "integrity": "sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q==",
- "requires": {
- "braces": "^3.0.1",
- "picomatch": "^2.0.5"
- }
- },
- "minimatch": {
- "version": "3.0.4",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
- "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
- "requires": {
- "brace-expansion": "^1.1.7"
- }
- },
- "ms": {
- "version": "2.1.2",
- "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
- "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
- },
- "nice-try": {
- "version": "1.0.5",
- "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz",
- "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ=="
- },
- "node-fetch": {
- "version": "2.6.1",
- "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz",
- "integrity": "sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw=="
- },
- "npm-run-path": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-2.0.2.tgz",
- "integrity": "sha1-NakjLfo11wZ7TLLd8jV7GHFTbF8=",
- "requires": {
- "path-key": "^2.0.0"
- }
- },
- "octokit-pagination-methods": {
- "version": "1.1.0",
- "resolved": "https://registry.npmjs.org/octokit-pagination-methods/-/octokit-pagination-methods-1.1.0.tgz",
- "integrity": "sha512-fZ4qZdQ2nxJvtcasX7Ghl+WlWS/d9IgnBIwFZXVNNZUmzpno91SX5bc5vuxiuKoCtK78XxGGNuSCrDC7xYB3OQ=="
- },
- "once": {
- "version": "1.4.0",
- "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
- "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
- "requires": {
- "wrappy": "1"
- }
- },
- "os-name": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/os-name/-/os-name-3.1.0.tgz",
- "integrity": "sha512-h8L+8aNjNcMpo/mAIBPn5PXCM16iyPGjHNWo6U1YO8sJTMHtEtyczI6QJnLoplswm6goopQkqc7OAnjhWcugVg==",
- "requires": {
- "macos-release": "^2.2.0",
- "windows-release": "^3.1.0"
- }
- },
- "p-finally": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz",
- "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4="
- },
- "path-is-absolute": {
- "version": "1.0.1",
- "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
- "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18="
- },
- "path-key": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
- "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A="
- },
- "path-type": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
- "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="
- },
- "picomatch": {
- "version": "2.2.1",
- "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.1.tgz",
- "integrity": "sha512-ISBaA8xQNmwELC7eOjqFKMESB2VIqt4PPDD0nsS95b/9dZXvVKOlz9keMSnoGGKcOHXfTvDD6WMaRoSc9UuhRA=="
- },
- "pump": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
- "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
- "requires": {
- "end-of-stream": "^1.1.0",
- "once": "^1.3.1"
- }
- },
- "reusify": {
- "version": "1.0.4",
- "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
- "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw=="
- },
- "run-parallel": {
- "version": "1.1.9",
- "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.9.tgz",
- "integrity": "sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q=="
- },
- "semver": {
- "version": "5.7.1",
- "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
- "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ=="
- },
- "shebang-command": {
- "version": "1.2.0",
- "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz",
- "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=",
- "requires": {
- "shebang-regex": "^1.0.0"
- }
- },
- "shebang-regex": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz",
- "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM="
- },
- "signal-exit": {
- "version": "3.0.2",
- "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz",
- "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0="
- },
- "slash": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
- "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q=="
- },
- "snakecase-keys": {
- "version": "3.1.0",
- "resolved": "https://registry.npmjs.org/snakecase-keys/-/snakecase-keys-3.1.0.tgz",
- "integrity": "sha512-QM038drLbhdOY5HcRQVjO1ZJ1WR7yV5D5TIBzcOB/g3f5HURHhfpYEnvOyzXet8K+MQsgeIUA7O7vn90nAX6EA==",
- "requires": {
- "map-obj": "^4.0.0",
- "to-snake-case": "^1.0.0"
- }
- },
- "string-width": {
- "version": "2.1.1",
- "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz",
- "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==",
- "requires": {
- "is-fullwidth-code-point": "^2.0.0",
- "strip-ansi": "^4.0.0"
- },
- "dependencies": {
- "ansi-regex": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
- "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg="
- },
- "strip-ansi": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
- "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
- "requires": {
- "ansi-regex": "^3.0.0"
- }
- }
- }
- },
- "strip-ansi": {
- "version": "5.2.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
- "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
- "requires": {
- "ansi-regex": "^4.1.0"
- }
- },
- "strip-eof": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/strip-eof/-/strip-eof-1.0.0.tgz",
- "integrity": "sha1-u0P/VZim6wXYm1n80SnJgzE2Br8="
- },
- "supports-color": {
- "version": "5.5.0",
- "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
- "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
- "requires": {
- "has-flag": "^3.0.0"
- }
- },
- "to-no-case": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/to-no-case/-/to-no-case-1.0.2.tgz",
- "integrity": "sha1-xyKQcWTvaxeBMsjmmTAhLRtKoWo="
- },
- "to-regex-range": {
- "version": "5.0.1",
- "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
- "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
- "requires": {
- "is-number": "^7.0.0"
- }
- },
- "to-snake-case": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/to-snake-case/-/to-snake-case-1.0.0.tgz",
- "integrity": "sha1-znRpE4l5RgGah+Yu366upMYIq4w=",
- "requires": {
- "to-space-case": "^1.0.0"
- }
- },
- "to-space-case": {
- "version": "1.0.0",
- "resolved": "https://registry.npmjs.org/to-space-case/-/to-space-case-1.0.0.tgz",
- "integrity": "sha1-sFLar7Gysp3HcM6gFj5ewOvJ/Bc=",
- "requires": {
- "to-no-case": "^1.0.0"
- }
- },
- "tslib": {
- "version": "1.11.0",
- "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.0.tgz",
- "integrity": "sha512-BmndXUtiTn/VDDrJzQE7Mm22Ix3PxgLltW9bSNLoeCY31gnG2OPx0QqJnuc9oMIKioYrz487i6K9o4Pdn0j+Kg=="
- },
- "universal-user-agent": {
- "version": "4.0.1",
- "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-4.0.1.tgz",
- "integrity": "sha512-LnST3ebHwVL2aNe4mejI9IQh2HfZ1RLo8Io2HugSif8ekzD1TlWpHpColOB/eh8JHMLkGH3Akqf040I+4ylNxg==",
- "requires": {
- "os-name": "^3.1.0"
- }
- },
- "universalify": {
- "version": "0.1.2",
- "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
- "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
- },
- "which": {
- "version": "1.3.1",
- "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
- "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
- "requires": {
- "isexe": "^2.0.0"
- }
- },
- "widest-line": {
- "version": "2.0.1",
- "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-2.0.1.tgz",
- "integrity": "sha512-Ba5m9/Fa4Xt9eb2ELXt77JxVDV8w7qQrH0zS/TWSJdLyAwQjWoOzpzj5lwVftDz6n/EOu3tNACS84v509qwnJA==",
- "requires": {
- "string-width": "^2.1.1"
- }
- },
- "windows-release": {
- "version": "3.2.0",
- "resolved": "https://registry.npmjs.org/windows-release/-/windows-release-3.2.0.tgz",
- "integrity": "sha512-QTlz2hKLrdqukrsapKsINzqMgOUpQW268eJ0OaOpJN32h272waxR9fkB9VoWRtK7uKHG5EHJcTXQBD8XZVJkFA==",
- "requires": {
- "execa": "^1.0.0"
- }
- },
- "wrap-ansi": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-4.0.0.tgz",
- "integrity": "sha512-uMTsj9rDb0/7kk1PbcbCcwvHUxp60fGDB/NNXpVa0Q+ic/e7y5+BwTxKfQ33VYgDppSwi/FBzpetYzo8s6tfbg==",
- "requires": {
- "ansi-styles": "^3.2.0",
- "string-width": "^2.1.1",
- "strip-ansi": "^4.0.0"
- },
- "dependencies": {
- "ansi-regex": {
- "version": "3.0.0",
- "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz",
- "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg="
- },
- "strip-ansi": {
- "version": "4.0.0",
- "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz",
- "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=",
- "requires": {
- "ansi-regex": "^3.0.0"
- }
- }
- }
- },
- "wrappy": {
- "version": "1.0.2",
- "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
- "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8="
- }
- }
-}
diff --git a/.pipeline-v3/package.json b/.pipeline-v3/package.json
deleted file mode 100644
index 6834ee3e6..000000000
--- a/.pipeline-v3/package.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
- "name": "pipeline",
- "version": "1.0.0",
- "description": "This a pipeliene script",
- "engines": {
- "node": ">=8"
- },
- "scripts": {
- "build": "node build.js",
- "clean": "node clean.js",
- "deploy": "node deploy.js",
- "deploy-unittest": "node deploy-unittest.js",
- "version": "echo \"node@$(node --version) ($(which node))\" && echo \"npm@$(npm --version) ($(which npm))\" && npm ls"
- },
- "repository": {
- "type": "git",
- "url": "git+https://github.com/bcgov/ocp-sso.git"
- },
- "author": "",
- "license": "Apache-2.0",
- "dependencies": {
- "@bcgov/gh-deploy": "^1.1.4",
- "@bcgov/pipeline-cli": "^1.0.1-0",
- "axios": "^0.21.1",
- "lodash": "^4.17.20"
- }
-}
diff --git a/.yo-rc.json-v3 b/.yo-rc.json-v3
deleted file mode 100644
index b592ba595..000000000
--- a/.yo-rc.json-v3
+++ /dev/null
@@ -1,54 +0,0 @@
-{
- "@bcgov/bcdk": {
- "promptValues": {
- "modules": {
- "zeva": {
- "name": "zeva",
- "version": "1.0.0",
- "path": ".",
- "environments": {
- "build": {
- "namespace": "tbiwaq-tools"
- },
- "dev": {
- "namespace": "tbiwaq-dev"
- },
- "test": {
- "namespace": "tbiwaq-test"
- },
- "prod": {
- "namespace": "tbiwaq-prod"
- }
- },
- "jenkinsJobName": "zeva",
- "github_owner": "bcgov",
- "github_repo": "zeva",
- "jenkinsFilePath": "Jenkinsfile",
- "uuid": "2c0c5c5b-6d52-4ae1-aefb-00b5bbd7c7e3"
- },
- "jenkins": {
- "path": ".jenkins",
- "name": "jenkins",
- "namespace": "tbiwaq-tools",
- "version": "1.0.0",
- "environments": {
- "build": {
- "namespace": "tbiwaq-tools"
- },
- "dev": {
- "namespace": "tbiwaq-tools"
- },
- "prod": {
- "namespace": "tbiwaq-tools"
- }
- },
- "jenkinsJobName": "_jenkins",
- "github_owner": "bcgov",
- "github_repo": "zeva",
- "jenkinsFilePath": ".jenkins/Jenkinsfile",
- "uuid": "7a55c783-db16-45ad-82cc-23792286d6c3"
- }
- }
- }
- }
-}
\ No newline at end of file
diff --git a/Jenkinsfile-v3 b/Jenkinsfile-v3
deleted file mode 100644
index dbb0f12cd..000000000
--- a/Jenkinsfile-v3
+++ /dev/null
@@ -1,67 +0,0 @@
-pipeline {
- agent none
- options {
- disableResume()
- }
- stages {
- stage('Build') {
- agent { label 'build' }
- steps {
- script {
- def filesInThisCommitAsString = sh(script:"git diff --name-only HEAD~1..HEAD | grep -v '^.jenkins-v3/' || echo -n ''", returnStatus: false, returnStdout: true).trim()
- def hasChangesInPath = (filesInThisCommitAsString.length() > 0)
- echo "${filesInThisCommitAsString}"
- if (!currentBuild.rawBuild.getCauses()[0].toString().contains('UserIdCause') && !hasChangesInPath){
- currentBuild.rawBuild.delete()
- error("No changes detected in the path ('^.jenkins-v3/')")
- }
- }
- echo "Aborting all running jobs ..."
- script {
- abortAllPreviousBuildInProgress(currentBuild)
- }
- echo "Building ..."
- sh "cd .pipeline-v3 && ./npmw ci && ./npmw run build -- --pr=${CHANGE_ID}"
- }
- }
- stage('Deploy (DEV)') {
- agent { label 'deploy' }
- steps {
- echo "Deploying ..."
- sh "cd .pipeline-v3 && ./npmw ci && ./npmw run deploy -- --pr=${CHANGE_ID} --env=dev"
- }
- }
-
- stage('Deploy (TEST)') {
- agent { label 'deploy' }
- when {
- expression { return env.CHANGE_TARGET == 'master';}
- beforeInput true
- }
- input {
- message "Should we continue with deployment to TEST?"
- ok "Yes!"
- }
- steps {
- echo "Deploying ..."
- sh "cd .pipeline-v3 && ./npmw ci && ./npmw run deploy -- --pr=${CHANGE_ID} --env=test"
- }
- }
- stage('Deploy (PROD)') {
- agent { label 'deploy' }
- when {
- expression { return env.CHANGE_TARGET == 'master';}
- beforeInput true
- }
- input {
- message "Should we continue with deployment to PROD? This is Prod!. Make sure you have made a database backup and verified no clients are using the system."
- ok "Yes!"
- }
- steps {
- echo "Deploying ..."
- sh "cd .pipeline-v3 && ./npmw ci && ./npmw run deploy -- --pr=${CHANGE_ID} --env=prod"
- }
- }
-
- }
-}
diff --git a/openshift-v3/README.md b/openshift-v3/README.md
deleted file mode 100644
index 25aca8826..000000000
--- a/openshift-v3/README.md
+++ /dev/null
@@ -1,300 +0,0 @@
-# Zeva Pull Request based Pipeline
-
-This readme file shows the process of adopting [BCDK](https://github.com/bcdevops/bcdk) as pull request based pipeline for [Zeva](https://github.com/bcgov/zeva) project.
-
-The sample component built and deployed by the pipeline is frontend.
-
-Create a branch called zeva-bcdk from master and all the following works are based on this branch.
-
-## Section 1 Create pull request based pipeline
-
-A folder .pipeline will be created under project root.
-If all steps are gone through smoothly, it will have the following structure.
-Commands will be available to build images and deploy to various environment at the end.
-
-```
--.pipeline
- -lib
- build.js
- clean.js
- config.js
- deploy.js
- -node_modules
- -@bcgov
- -pipeline-cli //https://github.com/BCDevOps/pipeline-cli
- ... //various nodejs modules
- .nvmrc
- build.js
- clean.js
- deploy.js
- npmw
- package.json
- package-log.json
-```
-
-### 1.1 Create .pipeline folder
-
-Run Yeoman generator to create .pipeline folder structure.
-Zeva has only one module, it has various components(frontend, backend and etc.) under it. The pipeline created will build and deploy module Zeva. If your project has multiple modules and each module has one pipeline, please ask help from BCDK developers.
-
-```
-~/Projects/zeva$ yo bcdk:pipeline
-? What is this module id/key? zeva
-? What is this module name? zeva
-? What is this module version? 1.0.0
-? What environments are supported by your app? separated by space build dev test prod
-? What is the source code directory for this module? .
-? What namespace/project name is used for 'build'? tbiwaq-tools
-? What namespace/project name is used for 'dev'? tbiwaq-dev
-? What namespace/project name is used for 'test'? tbiwaq-test
-? What namespace/project name is used for 'prod'? tbiwaq-prod
-```
-
-### 1.2 Create frontend build and deploy template for Openshift
-
-* [openshift/templates/frontend/frontend-bc.yaml](https://github.com/bcgov/zeva/blob/master/openshift/templates/frontend/frontend-bc.yaml)
-* [openshift/templates/frontend/frontend-dc.yaml](https://github.com/bcgov/zeva/blob/master/openshift/templates/frontend/frontend-dc.yaml)
-
-
-### 1.3 Customize frontend build process in pipeline
-
-Update ./pipeline/line/build.js line 14. The value of the param are included in ./pipeline/lib/config.js. The current phase is build and new values can be add to config.js.
-```
- // The building of your cool app goes here ▼▼▼
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-bc.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'GIT_URL': oc.git.http_url,
- 'GIT_REF': oc.git.ref
- }
- }))
-```
-Save the file, commit all changes, push to GitHub and create a pull request from zeva-bcdk to master. Assume the pull request number is #18.
-
-### 1.4 Build the pul request on commandline
-
-Build Config zeva-frontend-build-18 will be created under tools namespace.
-```
-~/Projects/zeva/.pipeline$ npm run build -- --pr=18
-```
-
-### 1.5 Customize deploy process in pipeline
-
-Update ./pipeline/line/deploy.js line 15. The values of param are also from ./pipeline/lib/config.js.
-
-```
- // The deployment of your cool app goes here ▼▼▼
- objects = objects.concat(oc.processDeploymentTemplate(`${templatesLocalBaseUrl}/templates/frontend/frontend-dc.yaml`, {
- 'param':{
- 'NAME': phases[phase].name,
- 'SUFFIX': phases[phase].suffix,
- 'VERSION': phases[phase].tag,
- 'ENV_NAME': phases[phase].instance,
- 'DASH_ENV_NAME': phases[phase].ssoSuffix,
- 'CPU_REQUEST': '100m',
- 'CPU_LIMIT': '500m',
- 'MEMORY_REQUEST': '1100M',
- 'MEMORY_LIMIT': '2G'
- }
- }))
-```
-
-### 1.6 Deploy the pull request to dev on command line
-
-Deployment config zeva-frontend-dev-18 will be created under dev namespace.
-```
-~/Projects/zeva/.pipeline$ npm run deploy -- --pr=18 --env=dev
-```
-
-### 1.7 Cleanup deployment configurations created for the pull request
-
-The deployment configs, services, routes and image tags related to the pull request will be removed.
-Recommend to run the cleanup command if a redeployment of a pull request encountering an issue.
-```
-~/Projects/zeva/.pipeline$ nnpm run clean -- --pr=18 --env=dev
-```
-
-## Section 2 Setup Jenkins on Openshift
-
-Create Jenkins master and slave instances on Openshift. Also create Zeva pipeline job on Jenkins. The pipeline job is able to scan pull requests when they are created/modified and trigger the pipeline as describe in the Jenkinsfile under project root.
-If all steps can go through smoothly, the following folder structure will be created under project root folder.
-The contents under .jenkins/.pipeline are similar as section 1. The idea is to use same pipeline to maintain Jenkins itself for the project.
-Project team can customize Jenkins by adding/changing the contents under .jenkins/docker and .jenkins/openshift folder. Especially for Jenkins slave image, it can be customize to add extended modules.
-A Jenkinsfile will also be created under project root.
-```
--.jenkins
- -.pipeline
- -lib
- build.js
- clean.js
- config.js
- deploy.js
- -node_modules
- -@bcgov
- -pipeline-cli //https://github.com/BCDevOps/pipeline-cli
- ... //various nodejs modules
- .nvmrc
- build.js
- clean.js
- deploy.js
- npmw
- package.json
- package-log.json
- -docker
- -contrib
- -jenkins
- -configuration
- -jobs
- _jenkins
- config.xml
- _zeva
- config.xml
- Dockerfile
- -openshift
- build-master.yaml
- build-slave.yaml
- deploy-master.yaml
- deploy-prereq.yaml
- deploy-slave.yaml
- secrets.json
- Jenkinsfile
- README.md
-```
-
-### 2.1 Create .jenkins folder
-
-Run Yeoman generator to create .jenkins folder structure.
-Jenkins only has build, dev and prod and they are all under tools project. Jenkins dev deployment should live under very short time, once it is verified ok, it should be removed. Jenkins prod is the one used to scan, build and deploy pull requests.
-
-```
-~/Projects/zeva$ yo bcdk:jenkins
-? What is your openshift 'tools' namespace name? tbiwaq-tools
-? What is this module name? jenkins
-? What is this module name? jenkins
-? What is this module version? 1.0.0
-Environments: build dev prod
-? What is the source code directory for this module? .jenkins
-? What namespace/project name is used for 'build'? tbiwaq-tools
-? What namespace/project name is used for 'dev'? tbiwaq-tools
-? What namespace/project name is used for 'prod'? tbiwaq-tools
-? What is the GitHub organization where the repository is located? bcgov
-? What is the repository's name? zeva
-? What is the Jenkinsfile path? .jenkins/Jenkinsfile
-Writing 'jenkins' files.
-Writing 'pipeline' files.
-Writing 'jenkins-job' files.
-Writing 'jenkins-overwrites' files.
- create .jenkins/docker/contrib/jenkins/configuration/jobs/_jenkins/config.xml
- create .jenkins/docker/Dockerfile
- create .jenkins/openshift/build-master.yaml
-```
-
-Commit .jenkins folder, yes only commit is required for now.
-
-### 2.2 Build Jenkins master and slave images on Openshift
-
-Just provide value 0 to pr. Two image streams and two build configs will be created for Jenkins master and slave.
-
-```
-~/Projects/zeva/.jenkins/.pipeline$ npm run build -- --pr=0 --dev-mode=true
-> pipeline@1.0.0 build /Users/kfan/Projects/zeva/.jenkins/.pipeline
-> node build.js "--pr=0" "--dev-mode=true"
-
-Starting new build for buildconfig.build.openshift.io/jenkins-build-0
-Starting new build for buildconfig.build.openshift.io/jenkins-slave-main-build-0
-```
-
-### 2.3 Deploy Jenkins master and slave on Openshift
-
-Make sure the proper network security policies have been applied. Otherwise slave node will not be able to connect to master.
-pr#0 doesn't have to exist.
-
-```
-~/Projects/zeva/.jenkins/.pipeline$ npm run deploy -- --pr=0 --env=dev
-```
-
-### 2.4 Create Zeva pipeline job in Jenkins
-
-The pipeline job will scan pull requests. Then it will run Jenkinsfile under project root to build and deploy the identified pull requests.
-
-```
-~/Projects/zeva$ yo bcdk:jenkins-job
-? Module name? zeva
-? Jenkins Job name? zeva
-? What is the GitHub organization where the repository is located? bcgov
-? What is the repository's name? zeva
-? What is the Jenkinsfile path? Jenkinsfile
-Writing 'jenkins-job' files.
- create .jenkins/docker/contrib/jenkins/configuration/jobs/zeva/config.xml
-```
-
-### 2.4 Rebuild Jenkins to include the new pipline job created
-
-If builds could not start, manually delete two jenkins image stream and rerun the command.
-```
-~/Projects/zeva/.jenkins/.pipeline$ npm run build -- --pr=0 --dev-mode=true
-
-> pipeline@1.0.0 build /Users/kfan/Projects/zeva/.jenkins/.pipeline
-> node build.js "--pr=0" "--dev-mode=true"
-
-Re-using image tbiwaq-tools/ImageStreamImage/jenkins@sha256:19562b7307e461430fc4fc950c5c72d3300dc0826c5c7ac2dcd0ca289b5d2866 for build buildconfig.build.openshift.io/jenkins-build-0
-Re-using image tbiwaq-tools/ImageStreamImage/jenkins-slave-main@sha256:56afeca0b6ea96d330a5a60447d8d8558535dc901ca9f7ad6590434335026db9 for build buildconfig.build.openshift.io/jenkins-slave-main-build-0
-```
-Commit all changes and push to GitHub
-
-### 2.5 Redeploy Jenkins with the new zeva pipeline job included
-```
-~/Projects/zeva/.jenkins/.pipeline$ npm run deploy -- --pr=0 --env=dev
-```
-
-## Section 3 Update Jenkins on Openshift
-A sample task to do is adding openshift cloud settings. The following settings are added to .jenkins/docker/contrib/jenkins/configuration/config.xml
-The change is done through branch jenkins-add-oepnshift
-```
-
- openshift
-
-
-
- false
- false
- false
- http://jenkins-prod.tbiwaq-tools.svc:8080
- jenkins-prod.tbiwaq-tools.svc:50000
- 10
- 5
- 5
- 15
- false
- 32
- 600
-
-
-```
-### 3.1 steps updating Jenkins
-* Git commit the change to branch jenkins-add-oepnshift
-* Create pull request 58
-* Login to jenkins admin console. Jenkins -> _jenkins -> Pull Requests(37) -> PR-58, the pipeline zeva/jenkins/Jenkinsfile is triggered
-* Openshift Console
- * build configs jenkins-build-58 and jenkins-slave-main-build-58 are created and triggered
- * jenkins:dev-1.0.0-58 and jenkins-slave-main:build-1.0.0-58 are tagged
- * deployment config jenkins-dev-58 and jenkins-slave-dev-58 are created and triggered
- * route jenkins-dev-58 is created
-* Open browser and go to url specified by route jenkins-dev-58
- * verify Jenkins -> manage Jenkins -> Configure System, there should be only one cloud openshift created
-* Open jenkins admin console
- * go to Jenkins -> _jenlins -> Pull Requests(37) -> PR-58 Console Output, it is asking "Should we continue with deployment to PROD?"
- * choose Yes,
- * image jenkins / build-1.0.0-58, dev-1.0.0-58 and prod-1.0.0 are same
- * image jenkins-slave-main / build-1.0.0-58, dev-1.0.0-58 and prod-1.0.0 are same
- * deployment configs jenkins-prod and jenkins-slave-prod are triggered by image change
-* Open browser and go to url specified by route jenkins-prod
- * verify Jenkins -> manage Jenkins -> Configure System, there should be only one cloud openshift created
-* Openshift console, bring down jenkins-slave-dev-58 and jenkins-dev-58
-
-## Tips
-* Project team should be responsible to build jenkins slave, such as add npm modules into it, then no need to use npmw anymore
-* After the Jenkins create successfully, two webhooks should have been created in zeva repo (if the webhooks show failed, it is ok as Jenkins may not be fully up yet)
-* Under dev namespace, grant admin permission to service account "tbiwaq-tools/jenkins-prod", we only allow "tbiwaq-tools/jenkins-prod" to deploy on dev, test and prod, NOT to allow "tbiwaq-tools/jenkins-dev" to do anything on these three environment
diff --git a/openshift-v3/templates/README.md b/openshift-v3/templates/README.md
deleted file mode 100644
index c4cdad657..000000000
--- a/openshift-v3/templates/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# ZEVA Openshift Setup
-
-## 1. Network Security
-
-* Followingthe instructions in openshift/templates/nsp/README.md
-
-## 2. Jenkins setup on tools project
-
-* openshift/jenkins/README.md
-* install node packages
-.jenkins/.pipeline$ npm install
-* Build jenkins and jenkins-slave-main image, create a pr such as 161
-.jenkins/.pipeline$ npm run build -- --pr=161 --env=build
-* Deploy jenkins to tools project
-.jenkins/.pipeline$ npm run deploy -- --pr=161 --env=dev
-.jenkins/.pipeline$ npm run deploy -- --pr=161 --env=prod
-
-Notes: for Jenkins, build, dev and prod are actually all on tools environment
-
-## 3. Pipeline to deploy on dev, test and prod
-
-### 3.1 Preparation for pipeline
-
-* openshift/templates/config/README.md [Before triggering pipeline]
-* openshift/templates/keycloak/README.md
-* openshift/templates/backend/README.md [Before triggering pipeline]
-* openshift/templates/frontend/README.md [Before triggering pipeline]
-* openshift/templates/minio/README.md [Before triggering pipeline]
-* openshift/templates/patroni/README.md [Before triggering pipeline]
-* openshift/templates/rabbitmq/README.md [Before triggering pipeline]
-
-### 3.2 Run pipeline
-
-For example the latest tracking pr is 199
-
-* .pipeline$ npm run build -- --pr=199 --env=build
-* .pipeline$ npm run deploy -- --pr=199 --env=dev
-* .pipeline$ npm run deploy -- --pr=199 --env=test
-* .pipeline$ npm run deploy -- --pr=199 --env=prod
-
-### 3.3 Post pipeline
-
-openshift/templates/backend/README.md [After pipeline completes]
-openshift/templates/frontend/README.md [After pipeline completes]
-
-## 4. Backup container
-
-openshift/templates/backup-container-2.0.0/openshift/templates/backup/README.md
-
-## 5. Nagios
-
-openshift/templates/nagios/README.md
-
-## 6. Database migration
-
-* openshift/templates/patroni/README.md [Database Migration from Openshift v3 to Openshift 4]
-
diff --git a/openshift-v3/templates/backend/README.md b/openshift-v3/templates/backend/README.md
deleted file mode 100644
index 303b1798d..000000000
--- a/openshift-v3/templates/backend/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-### Files included
-
- * backend-bc.yaml backend build config
- * backend-dc.yaml backend deployment config
- * django-secret-template.yaml create template.django-secret, it is not in pipeline and needs to run independently, it is used by backend-dc.yaml
- * backend-autoscaler.yaml create backend autoscaler, it is not in pipeline and needs to run independently
-
-### Prepare for pipeline build and deploy
-
-#### Before triggering pipeline
-
-1. Create base image used by backend registry.access.redhat.com/rhscl/python-36-rhel7:1-63
- * oc tag registry.access.redhat.com/rhscl/python-36-rhel7:1-63 tbiwaq-tools/python:3.6-1-63
-
-2. Create template secret template.django-secret
- * oc process -f django-secret-template.yaml | oc create -f - -n [project namespace]
-
-3. Create email service secret for each environment
- * oc process -f email-service-secret.yaml EMAIL_SERVICE_CLIENT_ID= EMAIL_SERVICE_CLIENT_SECRET= CHES_AUTH_URL= CHES_EMAIL_URL= | oc create -f - -n [env namespace]
-
-#### After pipeline completes
-
-1. After pipeline completes, create autoscaler for backend
- * oc process -f backend-autoscaler.yaml | oc create -f - -n [project namespace]
-
diff --git a/openshift-v3/templates/backend/backend-autoscaler.yaml b/openshift-v3/templates/backend/backend-autoscaler.yaml
deleted file mode 100644
index ffed6de34..000000000
--- a/openshift-v3/templates/backend/backend-autoscaler.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: backend-autoscaler
-parameters:
- - name: NAME
- displayName:
- description: the module name entered when run yo bcdk:pipeline, which is zeva
- required: true
- - name: SUFFIX
- displayName:
- description:
- required: true
- - name: MIN_REPLICAS
- displayName:
- description:
- required: true
- - name: MAX_REPLICAS
- displayName:
- description:
- required: true
-objects:
- - apiVersion: autoscaling/v1
- kind: HorizontalPodAutoscaler
- metadata:
- name: ${NAME}-backend${SUFFIX}-autoscaler
- spec:
- scaleTargetRef:
- apiVersion: apps.openshift.io/v1
- kind: DeploymentConfig
- name: ${NAME}-backend${SUFFIX}
- subresource: scale
- minReplicas: ${{MIN_REPLICAS}}
- maxReplicas: ${{MAX_REPLICAS}}
- targetCPUUtilizationPercentage: 80
\ No newline at end of file
diff --git a/openshift-v3/templates/backend/backend-bc.yaml b/openshift-v3/templates/backend/backend-bc.yaml
deleted file mode 100644
index 59787ab18..000000000
--- a/openshift-v3/templates/backend/backend-bc.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: zeva-backend-bc
-parameters:
- - name: NAME
- displayName:
- description: the module name entered when run yo bcdk:pipeline, which is zeva
- required: true
- - name: SUFFIX
- displayName:
- description: sample is -pr-0
- required: true
- - name: VERSION
- displayName:
- description: image tag name for output
- required: true
- - name: GIT_URL
- displayName:
- description: zeva repo
- required: true
- - name: GIT_REF
- displayName:
- description: zeva branch name of the pr
- required: true
-objects:
- - apiVersion: image.openshift.io/v1
- kind: ImageStream
- metadata:
- annotations:
- description: Keeps track of changes in the backend image
- creationTimestamp: null
- name: ${NAME}-python
- spec:
- lookupPolicy:
- local: false
- status:
- dockerImageRepository: ""
- - apiVersion: image.openshift.io/v1
- kind: ImageStream
- metadata:
- annotations:
- description: Keeps track of changes in the backend image
- labels:
- shared: "true"
- creationTimestamp: null
- name: ${NAME}-backend
- spec:
- lookupPolicy:
- local: false
- status:
- dockerImageRepository: ""
- - apiVersion: build.openshift.io/v1
- kind: BuildConfig
- metadata:
- annotations:
- description: Defines how to build the application
- creationTimestamp: null
- name: ${NAME}-backend${SUFFIX}
- spec:
- nodeSelector: null
- output:
- to:
- kind: ImageStreamTag
- name: ${NAME}-backend:${VERSION}
- postCommit: {}
- resources:
- limits:
- cpu: 2000m
- memory: 2G
- requests:
- cpu: 500m
- memory: 200M
- runPolicy: SerialLatestOnly
- source:
- contextDir: backend
- git:
- ref: ${GIT_REF}
- uri: ${GIT_URL}
- type: Git
- strategy:
- sourceStrategy:
- env:
- - name: PIP_INDEX_URL
- from:
- kind: ImageStreamTag
- name: python:3.6-1-63
- type: Source
- triggers:
- - imageChange: {}
- type: ImageChange
- - type: ConfigChange
- status:
- lastVersion: 0
diff --git a/openshift-v3/templates/backend/backend-dc.yaml b/openshift-v3/templates/backend/backend-dc.yaml
deleted file mode 100644
index b6c6b88fe..000000000
--- a/openshift-v3/templates/backend/backend-dc.yaml
+++ /dev/null
@@ -1,426 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: zeva-backend-dc
-parameters:
- - name: NAME
- displayName: null
- description: 'the module name entered when run yo bcdk:pipeline, which is zeva'
- required: true
- - name: SUFFIX
- displayName: null
- description: sample is -dev-97
- required: true
- - name: VERSION
- displayName: null
- description: image tag name for output
- required: true
- - name: ENV_NAME
- value: dev
- displayName: Environment name
- description: 'Environment name, dev, test and prod'
- required: true
- - name: BACKEND_HOST_NAME
- displayName: Host name for route
- description: Host name for route
- required: true
- - name: RABBITMQ_CLUSTER_NAME
- displayName: rabbitmq cluser name
- description: rabbitmq cluser name
- required: true
- - name: CPU_REQUEST
- displayName: Requested CPU
- description: Requested CPU
- required: true
- - name: CPU_LIMIT
- displayName: CPU upper limit
- description: CPU upper limit
- required: true
- - name: MEMORY_REQUEST
- displayName: Requested memory
- description: Requested memory
- required: true
- - name: MEMORY_LIMIT
- displayName: Memory upper limit
- description: Memory upper limit
- required: true
- - name: HEALTH_CHECK_DELAY
- value: '150'
- displayName: Memory upper limit
- description: Memory upper limit
- required: true
- - name: REPLICAS
- value: '1'
- required: true
-objects:
- - apiVersion: v1
- kind: Secret
- metadata:
- labels:
- app: ${NAME}${SUFFIX}
- annotations:
- as-copy-of: "template.django-secret"
- name: ${NAME}-django${SUFFIX}
- stringData:
- DJANGO_SECRET_KEY: ${DJANGO_SECRET_KEY}
- - apiVersion: image.openshift.io/v1
- kind: ImageStream
- metadata:
- annotations:
- description: Keeps track of changes in the backend image
- labels:
- shared: "true"
- creationTimestamp: null
- name: '${NAME}-backend'
- spec:
- lookupPolicy:
- local: false
- status:
- dockerImageRepository: ''
- - apiVersion: v1
- kind: Service
- metadata:
- creationTimestamp: null
- name: '${NAME}-backend${SUFFIX}'
- labels:
- name: backend
- app: zeva
- role: backend
- env: '${ENV_NAME}'
- spec:
- ports:
- - name: backend
- port: 8080
- protocol: TCP
- targetPort: 8080
- selector:
- name: '${NAME}-backend${SUFFIX}'
- sessionAffinity: None
- type: ClusterIP
- status:
- loadBalancer: {}
- - apiVersion: route.openshift.io/v1
- kind: Route
- metadata:
- creationTimestamp: null
- annotations:
- haproxy.router.openshift.io/balance: source
- haproxy.router.openshift.io/timeout: 1200s
- labels:
- name: backend
- app: zeva
- role: backend
- env: '${ENV_NAME}'
- name: ${NAME}-backend${SUFFIX}
- spec:
- host: ${BACKEND_HOST_NAME}
- path: /api
- port:
- targetPort: backend
- tls:
- insecureEdgeTerminationPolicy: Redirect
- termination: edge
- to:
- kind: Service
- name: '${NAME}-backend${SUFFIX}'
- weight: 100
- wildcardPolicy: None
-# - apiVersion: route.openshift.io/v1
-# kind: Route
-# metadata:
-# creationTimestamp: null
-# labels:
-# name: backend
-# app: zeva
-# role: backend
-# env: '${ENV_NAME}'
-# name: '${NAME}-backend-health${SUFFIX}'
-# spec:
-# host: ${HOST_NAME}
-# path: /health
-# port:
-# targetPort: backend
-# tls:
-# termination: edge
-# to:
-# kind: Service
-# name: '${NAME}-backend${SUFFIX}'
-# weight: 100
-# wildcardPolicy: None
- - apiVersion: apps.openshift.io/v1
- kind: DeploymentConfig
- metadata:
- annotations:
- description: Defines how to deploy the application server
- creationTimestamp: null
- labels:
- app: zeva
- role: backend
- env: ${ENV_NAME}
- name: ${NAME}-backend${SUFFIX}
- spec:
- replicas: ${{REPLICAS}}
- revisionHistoryLimit: 10
- selector:
- name: ${NAME}-backend${SUFFIX}
- strategy:
- activeDeadlineSeconds: 800
- recreateParams:
- mid:
- execNewPod:
- command:
- - /bin/sh
- - '-c'
- - |-
- sleep 90
- python ./manage.py migrate
- if [ $? -eq 0 ]; then
- python ./manage.py load_ops_data --directory ./api/fixtures/operational
- else
- exit 1
- fi
- containerName: backend
- failurePolicy: Retry
- timeoutSeconds: 600
- resources: {}
- type: Recreate
- template:
- metadata:
- creationTimestamp: null
- labels:
- name: ${NAME}-backend${SUFFIX}
- spec:
- containers:
- - name: backend
- image: null
- imagePullPolicy: IfNotPresent
- env:
- - name: DATABASE_NAME
- value: zeva
- - name: DATABASE_USER
- valueFrom:
- secretKeyRef:
- name: patroni${SUFFIX}
- key: app-db-username
- - name: DATABASE_PASSWORD
- valueFrom:
- secretKeyRef:
- name: patroni${SUFFIX}
- key: app-db-password
- - name: DATABASE_ENGINE
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: database_engine
- - name: DATABASE_SERVICE_NAME
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: database_service_name
- - name: POSTGRESQL_SERVICE_HOST
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: postgresql_service_host
- - name: POSTGRESQL_SERVICE_PORT
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: postgresql_service_port
- - name: KEYCLOAK_CERTS_URL
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_certs_url
- - name: KEYCLOAK_REALM
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_realm
- - name: KEYCLOAK_REALM_URL
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_realm_url
- - name: KEYCLOAK_AUTHORITY
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_authority
- - name: KEYCLOAK_ISSUER
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_issuer
- - name: KEYCLOAK_AUDIENCE
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_audience
- - name: KEYCLOAK_CLIENT_ID
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_client_id
- - name: KEYCLOAK_SA_BASEURL
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_sa_baseurl
- - name: KEYCLOAK_SA_REALM
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_sa_realm
- - name: KEYCLOAK_SA_CLIENT_ID
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: keycloak_sa_client_id
- - name: KEYCLOAK_SA_CLIENT_SECRET
- valueFrom:
- secretKeyRef:
- name: zeva-keycloak
- key: KEYCLOAK_SA_CLIENT_SECRET
- - name: DJANGO_DEBUG
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: django_debug
- - name: DJANGO_SECRET_KEY
- valueFrom:
- secretKeyRef:
- name: ${NAME}-django${SUFFIX}
- key: DJANGO_SECRET_KEY
- - name: RABBITMQ_VHOST
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: rabbitmq_vhost
- - name: RABBITMQ_USER
- valueFrom:
- secretKeyRef:
- name: ${NAME}${SUFFIX}-${RABBITMQ_CLUSTER_NAME}-secret
- key: username
- - name: RABBITMQ_PASSWORD
- valueFrom:
- secretKeyRef:
- name: ${NAME}${SUFFIX}-${RABBITMQ_CLUSTER_NAME}-secret
- key: password
- - name: RABBITMQ_HOST
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: rabbitmq_host
- - name: RABBITMQ_PORT
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: rabbitmq_port
- - name: EMAIL_SENDING_ENABLED
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: email_sending_enabled
- - name: EMAIL_FROM_ADDRESS
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: email_from_address
- - name: APP_CONFIG
- value: /opt/app-root/src/gunicorn.cfg
- - name: ENV_NAME
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: env_name
- - name: MINIO_USE_SSL
- value: 'true'
- - name: MINIO_ACCESS_KEY
- valueFrom:
- secretKeyRef:
- name: ${NAME}-minio-${ENV_NAME}
- key: MINIO_ACCESS_KEY
- - name: MINIO_SECRET_KEY
- valueFrom:
- secretKeyRef:
- name: ${NAME}-minio-${ENV_NAME}
- key: MINIO_SECRET_KEY
- - name: MINIO_ENDPOINT
- valueFrom:
- configMapKeyRef:
- name: ${NAME}-config${SUFFIX}
- key: minio_endpoint
- - name: EMAIL_SERVICE_CLIENT_ID
- valueFrom:
- secretKeyRef:
- name: email-service
- key: EMAIL_SERVICE_CLIENT_ID
- - name: EMAIL_SERVICE_CLIENT_SECRET
- valueFrom:
- secretKeyRef:
- name: email-service
- key: EMAIL_SERVICE_CLIENT_SECRET
- - name: CHES_AUTH_URL
- valueFrom:
- secretKeyRef:
- name: email-service
- key: CHES_AUTH_URL
- - name: CHES_EMAIL_URL
- valueFrom:
- secretKeyRef:
- name: email-service
- key: CHES_EMAIL_URL
- livenessProbe:
- failureThreshold: 30
- tcpSocket:
- port: 8080
- initialDelaySeconds: ${{HEALTH_CHECK_DELAY}}
- periodSeconds: 15
- successThreshold: 1
- timeoutSeconds: 3
- ports:
- - containerPort: 8080
- protocol: TCP
- readinessProbe:
- failureThreshold: 30
- tcpSocket:
- port: 8080
- initialDelaySeconds: ${{HEALTH_CHECK_DELAY}}
- periodSeconds: 15
- successThreshold: 1
- timeoutSeconds: 3
- resources:
- limits:
- cpu: ${CPU_LIMIT}
- memory: ${MEMORY_LIMIT}
- requests:
- cpu: ${CPU_REQUEST}
- memory: ${MEMORY_REQUEST}
- terminationMessagePath: /dev/termination-log
- terminationMessagePolicy: File
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- schedulerName: default-scheduler
- securityContext: {}
- terminationGracePeriodSeconds: 30
- test: false
- triggers:
- - imageChangeParams:
- automatic: true
- containerNames:
- - backend
- from:
- kind: ImageStreamTag
- name: ${NAME}-backend:${VERSION}
- lastTriggeredImage: null
- type: ImageChange
- - type: ConfigChange
- status:
- availableReplicas: 0
- latestVersion: 0
- observedGeneration: 0
- replicas: 0
- unavailableReplicas: 0
- updatedReplicas: 0
diff --git a/openshift-v3/templates/backend/django-secret-template.yaml b/openshift-v3/templates/backend/django-secret-template.yaml
deleted file mode 100644
index 150b9b92b..000000000
--- a/openshift-v3/templates/backend/django-secret-template.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-## The regular exopression really should be [a-zA-Z0-9!_=$^()+]{50}
-## but it doesn't work for oOpenshift
-apiVersion: template.openshift.io/v1
-kind: Template
-parameters:
-- name: DJANGO_SECRET
- description: "Cookie used for authentication of cluster nodes"
- from: "[a-zA-Z0-9]{50}"
- generate: expression
-objects:
-- apiVersion: v1
- kind: Secret
- metadata:
- annotations: null
- name: template.django-secret
- stringData:
- DJANGO_SECRET_KEY: ${DJANGO_SECRET}
\ No newline at end of file
diff --git a/openshift-v3/templates/backend/email-service-secret.yaml b/openshift-v3/templates/backend/email-service-secret.yaml
deleted file mode 100644
index 2de0bf6c9..000000000
--- a/openshift-v3/templates/backend/email-service-secret.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-parameters:
-- name: EMAIL_SERVICE_CLIENT_ID
- description: the client id for Zeva project
- required: true
-- name: EMAIL_SERVICE_CLIENT_SECRET
- description: the secrete for Zeva project
- required: true
-- name: CHES_AUTH_URL
- description: the authentication url to retrieve token
- required: true
-- name: CHES_EMAIL_URL
- description: the email service url
- required: true
-objects:
-- apiVersion: v1
- kind: Secret
- metadata:
- annotations: null
- name: email-service
- stringData:
- EMAIL_SERVICE_CLIENT_ID: ${EMAIL_SERVICE_CLIENT_ID}
- EMAIL_SERVICE_CLIENT_SECRET: ${EMAIL_SERVICE_CLIENT_SECRET}
- CHES_AUTH_URL: ${CHES_AUTH_URL}
- CHES_EMAIL_URL: ${CHES_EMAIL_URL}
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/.gitattributes b/openshift-v3/templates/backup-container-2.0.0/.gitattributes
deleted file mode 100644
index a295ec358..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/.gitattributes
+++ /dev/null
@@ -1,12 +0,0 @@
-# Set the default behavior, in case people don't have core.autocrlf set.
-* text=auto
-
-# Declare files that will always have LF line endings on checkout.
-backup.* text eol=lf
-*.sh text eol=lf
-*.md text eol=lf
-*.json text eol=lf
-*.conf text eol=lf
-**/s2i/bin/* text eol=lf
-**/root/**/* text eol=lf
-**/.scripts/* text eol=lf
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/.gitignore b/openshift-v3/templates/backup-container-2.0.0/.gitignore
deleted file mode 100644
index 1d1784a16..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/.gitignore
+++ /dev/null
@@ -1,16 +0,0 @@
-# See http://help.github.com/ignore-files/ for more about ignoring files.
-.DS_Store
-
-# Files created by the scripts from; https://github.com/BCDevOps/openshift-project-tools
-*_DeploymentConfig.json
-*_BuildConfig.json
-*.local.*
-*.overrides.*
-
-# Visual Studio Code
-.vscode
-
-# Local config
-.env
-backups
-docker/backup.conf
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/CODE_OF_CONDUCT.md b/openshift-v3/templates/backup-container-2.0.0/CODE_OF_CONDUCT.md
deleted file mode 100644
index 078593ff7..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at angelika.ehlers@gov.bc.ca. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/openshift-v3/templates/backup-container-2.0.0/CONTRIBUTING.md b/openshift-v3/templates/backup-container-2.0.0/CONTRIBUTING.md
deleted file mode 100644
index 539ed4eed..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/CONTRIBUTING.md
+++ /dev/null
@@ -1,10 +0,0 @@
-
-# How to contribute
-
-Government employees, public and members of the private sector are encouraged to contribute to the repository by **forking and submitting a pull request**.
-
-(If you are new to GitHub, you might start with a [basic tutorial](https://help.github.com/articles/set-up-git) and check out a more detailed guide to [pull requests](https://help.github.com/articles/using-pull-requests/).)
-
-Pull requests will be evaluated by the repository guardians on a schedule and if deemed beneficial will be committed to the master.
-
-All contributors retain the original copyright to their stuff, but by contributing to this project, you grant a world-wide, royalty-free, perpetual, irrevocable, non-exclusive, transferable license to all users **under the terms of the license under which this project is distributed.**
diff --git a/openshift-v3/templates/backup-container-2.0.0/LICENSE b/openshift-v3/templates/backup-container-2.0.0/LICENSE
deleted file mode 100644
index 261eeb9e9..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/openshift-v3/templates/backup-container-2.0.0/README.md b/openshift-v3/templates/backup-container-2.0.0/README.md
deleted file mode 100644
index eba2501ef..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/README.md
+++ /dev/null
@@ -1,368 +0,0 @@
----
-title: Backup Container
-description: A simple containerized backup solution for backing up one or more postgres or mongo databases to a secondary location.
-author: WadeBarnes
-resourceType: Components
-personas:
- - Developer
- - Product Owner
- - Designer
-labels:
- - backup
- - backups
- - postgres
- - mongo
- - database
----
-[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
-
-# Backup Container
-[Backup Container](https://github.com/BCDevOps/backup-container) is a simple containerized backup solution for backing up one or more postgres or mongo databases to a secondary location. _Code and documentation was originally pulled from the [HETS Project](https://github.com/bcgov/hets)_
-
-# Backup Container Options
-You can run the Backup Container for postgres and mongo databases separately or in a mixed environment.
-For a mixed environment:
-1) You MUST use the recommended `backup.conf` configuration.
-2) Within the `backup.conf`, you MUST specify the `DatabaseType` for each listed database.
-3) You will need to create two builds and two deployment configs. One for a postgres backup container and the other for a mongo backup container.
-4) Mount the same `backup.conf` file (ConfigMap) to each deployed container.
-
-## Backups in OpenShift
-This project provides you with a starting point for integrating backups into your OpenShift projects. The scripts and templates provided in the [openshift](./openshift) directory are compatible with the [openshift-developer-tools](https://github.com/BCDevOps/openshift-developer-tools) scripts. They help you create an OpenShift deployment or cronjob called `backup` in your projects that runs backups on databases within the project environment. You only need to integrate the scripts and templates into your project(s), the builds can be done with this repository as the source.
-
-Following are the instructions for running the backups and a restore.
-
-## Storage
-*Before we get too far into the the details, we're going to take a moment to discuss the most important part of the whole process - **The Storage**.* The backup container uses two volumes, one for storing the backups and the other for restore/verification testing. The deployment template separates them intentionally.
-
-The following sections on storage discuss the recommendations and limitations of the storage classes created specifically for the BC Government's environment.
-
-
-### Backup Storage Volume
-The recommended storage class for the backup volume is `nfs-backup`. This class of storage **cannot** be auto-provisioned through the use of a deployment template. The `PersistentVolumeClaim` declared in the supplied deployment template for the *backup volume* will purposely fail to properly provision and wire an `nfs-backup` volume if published before you manually provision your `nfs-backup` claim.
-
-When using `nfs-backup` you will need to provision your claims **before** you publish your deployment configuration, through either the [service catalog](https://github.com/BCDevOps/provision-nfs-apb#provision-via-gui-catalog) using the [BC Gov NFS Storage](https://github.com/BCDevOps/provision-nfs-apb/blob/master/docs/usage-gui.md) wizard, or by using the [svcat cli](https://github.com/BCDevOps/provision-nfs-apb#provision-via-svcat-cli).
-
-You'll note the name of the resulting storage claim has a random component to it (example, `bk-devex-von-bc-tob-test-xjrmkhsnshay`). This name needs to be injected into the default value of the `BACKUP_VOLUME_NAME` parameter of the template **before** publishing the deployment configuration in order for the storage to be correctly mounted to the `/backups/` directory of the container.
-
-`nfs-backup` storageClass is a lower tier of storage and not considered highly available. `read: don't use this for live application storage`. The storageClass **IS** covered by the default enterprise backup policies, and can be directly referenced for restores using the PVC name when opening a restore ticket with 7700.
-
-`nfs-backup` PVCs **cannot** be used for restore/verification. The permissions on the underlying volume do not allow the PostgreSql server to host it's configuration and data files on a directory backed by this class of storage.
-
-Ensure you review and plan your storage requirements before provisioning.
-
-More information on provisioning `nfs-backup` storage here; [provision-nfs-apb](https://github.com/BCDevOps/provision-nfs-apb)
-
-#### NFS Storage Backup and Retention Policy
-NFS backed storage is covered by the following backup and retention policies:
-- Backup
- - Daily: Incremental
- - Monthly: Full
-- Retention
- - 90 days
-
-### Restore/Verification Storage Volume
-The default storage class for the restore/verification volume is `netapp-file-standard`. The supplied deployment template will auto-provision this volume for you with it is published. Refer to the *Storage Performance* section for performance considerations.
-
-This volume should be large enough to host your largest database. Set the size by updating/overriding the `VERIFICATION_VOLUME_SIZE` value within the template.
-
-### Storage Performance
-The performance of `netapp-block-standard` for restore/verification is far superior to that of `netapp-file-standard`, however it should only be used in cases where the time it takes to verify a backup begins to encroach on the over-all timing and verification cycle. You want the verification(s) to complete before another backup and verification cycle begins and you want a bit of idle time between the end of one cycle and the beginning of another in case things take a little longer now and again.
-
-*There are currently no performance stats for the `netapp` storage types.*
-
-## Deployment / Configuration
-Together, the scripts and templates provided in the [openshift](./openshift) directory will automatically deploy the `backup` app as described below. The [backup-deploy.overrides.sh](./openshift/backup-deploy.overrides.sh) script generates the deployment configuration necessary for the [backup.conf](config/backup.conf) file to be mounted as a ConfigMap by the `backup` container.
-
-The following environment variables are defaults used by the `backup` app.
-
-**NOTE**: These environment variables MUST MATCH those used by the postgresql container(s) you are planning to backup.
-
-| Name | Default (if not set) | Purpose |
-| ---- | ------- | ------- |
-| BACKUP_STRATEGY | rolling | To control the backup strategy used for backups. This is explained more below. |
-| BACKUP_DIR | /backups/ | The directory under which backups will be stored. The deployment configuration mounts the persistent volume claim to this location when first deployed. |
-| NUM_BACKUPS | 31 | Used for backward compatibility only, this value is used with the daily backup strategy to set the number of backups to retain before pruning. |
-| DAILY_BACKUPS | 6 | When using the rolling backup strategy this value is used to determine the number of daily (Mon-Sat) backups to retain before pruning. |
-| WEEKLY_BACKUPS | 4 | When using the rolling backup strategy this value is used to determine the number of weekly (Sun) backups to retain before pruning. |
-| MONTHLY_BACKUPS | 1 | When using the rolling backup strategy this value is used to determine the number of monthly (last day of the month) backups to retain before pruning. |
-| BACKUP_PERIOD | 1d | Only used for Legacy Mode. Ignored when running in Cron Mode. The schedule on which to run the backups. The value is used by a sleep command and can be defined in d, h, m, or s. |
-| DATABASE_SERVICE_NAME | postgresql | Used for backward compatibility only. The name of the service/host for the *default* database target. |
-| DATABASE_USER_KEY_NAME | database-user | The database user key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. |
-| DATABASE_PASSWORD_KEY_NAME | database-password | The database password key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. |
-| DATABASE_NAME | my_postgres_db | Used for backward compatibility only. The name of the *default* database target; the name of the database you want to backup. |
-| DATABASE_USER | *wired to a secret* | The username for the database(s) hosted by the database server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-user`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. |
-| DATABASE_PASSWORD | *wired to a secret* | The password for the database(s) hosted by the database server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-password`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. |
-| FTP_URL | | The FTP server URL. If not specified, the FTP backup feature is disabled. The default value in the deployment configuration is an empty value - not specified. |
-| FTP_USER | *wired to a secret* | The username for the FTP server. The deployment configuration creates a secret with the name specified in the FTP_SECRET_KEY parameter (default: `ftp-secret`). The key for the username is `ftp-user` and the value is an empty value by default. |
-| FTP_PASSWORD | *wired to a secret* | The password for the FTP server. The deployment configuration creates a secret with the name specified in the FTP_SECRET_KEY parameter (default: `ftp-secret`). The key for the password is `ftp-password` and the value is an empty value by default. |
-| WEBHOOK_URL | | The URL of the webhook endpoint to use for notifications. If not specified, the webhook integration feature is disabled. The default value in the deployment configuration is an empty value - not specified. |
-| ENVIRONMENT_FRIENDLY_NAME | | A friendly (human readable) name of the environment. This variable is used by the webhook integration to identify the environment from which the backup notifications originate. The default value in the deployment configuration is an empty value - not specified. |
-| ENVIRONMENT_NAME | | A name or ID of the environment. This variable is used by the webhook integration to identify the environment from which the backup notifications originate. The default value in the deployment configuration is an empty value - not specified. |
-
-### backup.conf
-
-Using this default configuration you can easily back up a single postgres database, however we recommend you extend the configuration and use the `backup.conf` file to list a number of databases for backup and even set a cron schedule for the backups.
-
-When using the `backup.conf` file the following environment variables are ignored, since you list all of your `host`/`database` pairs in the file; `DATABASE_SERVICE_NAME`, `DATABASE_NAME`. To provide the credentials needed for the listed databases you extend the deployment configuration to include `hostname_USER` and `hostname_PASSWORD` credential pairs which are wired to the appropriate secrets (where hostname matches the hostname/servicename, in all caps and underscores, of the database). For example, if you are backing up a database named `wallet-db/my_wallet`, you would have to extend the deployment configuration to include a `WALLET_DB_USER` and `WALLET_DB_PASSWORD` credential pair, wired to the appropriate secrets, to access the database(s) on the `wallet-db` server.
-
-### Cron Mode
-
-The `backup` container supports running the backups on a cron schedule. The schedule is specified in the `backup.conf` file. Refer to the [backup.conf](./config/backup.conf) file for additional details and examples.
-
-### Cronjob Deployment / Configuration / Constraints
-
-*This section describes the configuration of an OpenShift CronJob this is different than the Cron Mode supported by the container when deployed in "long running" mode.*
-
-The cronjob object can be deployed in the same manner as the application, and will also have a dependency on the image built by the build config. The main constraint for the cronjob objects is that they will require a configmap in place of environment variables and does not support the `backup.conf` for multiple database backups in the same job. In order to backup multiple databases, create multiple cronjob objects with their associated configmaps and secrets.
-
-The following variables are supported in the first iteration of the backup cronjob:
-
-| Name | Default (if not set) | Purpose |
-| ---- | -------------------- | ------- |
-| BACKUP_STRATEGY | daily | To control the backup strategy used for backups. This is explained more below. |
-| BACKUP_DIR | /backups/ | The directory under which backups will be stored. The deployment configuration mounts the persistent volume claim to this location when first deployed. |
-| SCHEDULE | 0 1 * * * | Cron Schedule to Execute the Job (using local cluster system TZ). |
-| NUM_BACKUPS | 31 | For backward compatibility this value is used with the daily backup strategy to set the number of backups to retain before pruning. |
-| DAILY_BACKUPS | 6 | When using the rolling backup strategy this value is used to determine the number of daily (Mon-Sat) backups to retain before pruning. |
-| WEEKLY_BACKUPS | 4 | When using the rolling backup strategy this value is used to determine the number of weekly (Sun) backups to retain before pruning. |
-| MONTHLY_BACKUPS | 1 | When using the rolling backup strategy this value is used to determine the number of monthly (last day of the month) backups to retain before pruning. |
-| DATABASE_SERVICE_NAME | postgresql | The name of the service/host for the *default* database target. |
-| DATABASE_USER_KEY_NAME | database-user | The database user key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. |
-| DATABASE_PASSWORD_KEY_NAME | database-password | The database password key name stored in database deployment resources specified by DATABASE_DEPLOYMENT_NAME. |
-| POSTGRESQL_DATABASE | my_postgres_db | The name of the *default* database target; the name of the database you want to backup. |
-| POSTGRESQL_USER | *wired to a secret* | The username for the database(s) hosted by the `postgresql` Postgres server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-user`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. |
-| POSTGRESQL_PASSWORD | *wired to a secret* | The password for the database(s) hosted by the `postgresql` Postgres server. The deployment configuration makes the assumption you have your database credentials stored in secrets (which you should), and the key for the username is `database-password`. The name of the secret must be provided as the `DATABASE_DEPLOYMENT_NAME` parameter to the deployment configuration template. |
-
-The following variables are NOT supported:
-
-| Name | Default (if not set) | Purpose |
-| ---- | -------------------- | ------- |
-| BACKUP_PERIOD | 1d | The schedule on which to run the backups. The value is replaced by the cron schedule variable (SCHEDULE) |
-
-The scheduled job does not yet support the FTP environment variables.
-
-| Name |
-| ---- |
-| FTP_URL |
-| FTP_USER |
-| FTP_PASSWORD |
-
-### Resources
-The backup-container is assigned with `Best-effort` resource type (setting zero for request and limit), which allows the resources to scale up and down without an explicit limit as resource on the node allow. It benefits from large bursts of recourses for short periods of time to get things more quickly. After some time of running the backup-container, you could then set the request and limit according to the average resource consumption.
-
-## Multiple Databases
-
-When backing up multiple databases, the retention settings apply to each database individually. For instance if you use the `daily` strategy and set the retention number(s) to 5, you will retain 5 copies of each database. So plan your backup storage accordingly.
-
-An example of the backup container in action can be found here; [example log output](./docs/ExampleLog.md)
-
-## Backup Strategies
-
-The `backup` app supports two backup strategies, each are explained below. Regardless of the strategy backups are identified using a core name derived from the `host/database` specification and a timestamp. All backups are compressed using gzip.
-
-### Daily
-
-The daily backup strategy is very simple. Backups are created in dated folders under the top level `/backups/` folder. When the maximum number of backups (`NUM_BACKUPS`) is exceeded, the oldest ones are pruned from disk.
-
-For example (faked):
-```
-================================================================================================================================
-Current Backups:
---------------------------------------------------------------------------------------------------------------------------------
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/postgresql-TheOrgBook_Database_2018-10-03_22-16-11.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/postgresql-TheOrgBook_Database_2018-10-03_22-16-28.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/postgresql-TheOrgBook_Database_2018-10-03_22-16-46.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/wallet-db-tob_holder_2018-10-03_22-16-13.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/wallet-db-tob_holder_2018-10-03_22-16-31.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/wallet-db-tob_holder_2018-10-03_22-16-48.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/wallet-db-tob_verifier_2018-10-03_22-16-08.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/wallet-db-tob_verifier_2018-10-03_22-16-25.sql.gz
-1.0K 2018-10-03 22:16 ./backups/2018-10-03/wallet-db-tob_verifier_2018-10-03_22-16-43.sql.gz
-13K 2018-10-03 22:16 ./backups/2018-10-03
-...
-61K 2018-10-04 10:43 ./backups/
-================================================================================================================================
-```
-
-### Rolling
-
-The rolling backup strategy provides a bit more flexibility. It allows you to keep a number of recent `daily` backups, a number of `weekly` backups, and a number of `monthly` backups.
-
-- Daily backups are any backups done Monday through Saturday.
-- Weekly backups are any backups done at the end of the week, which we're calling Sunday.
-- Monthly backups are any backups done on the last day of a month.
-
-There are retention settings you can set for each. The defaults provide you with a week's worth of `daily` backups, a month's worth of `weekly` backups, and a single backup for the previous month.
-
-Although the example does not show any `weekly` or `monthly` backups, you can see from the example that the folders are further broken down into the backup type.
-
-For example (faked):
-```
-================================================================================================================================
-Current Backups:
---------------------------------------------------------------------------------------------------------------------------------
-0 2018-10-03 22:16 ./backups/daily/2018-10-03
-1.0K 2018-10-04 09:29 ./backups/daily/2018-10-04/postgresql-TheOrgBook_Database_2018-10-04_09-29-52.sql.gz
-1.0K 2018-10-04 10:37 ./backups/daily/2018-10-04/postgresql-TheOrgBook_Database_2018-10-04_10-37-15.sql.gz
-1.0K 2018-10-04 09:29 ./backups/daily/2018-10-04/wallet-db-tob_holder_2018-10-04_09-29-55.sql.gz
-1.0K 2018-10-04 10:37 ./backups/daily/2018-10-04/wallet-db-tob_holder_2018-10-04_10-37-18.sql.gz
-1.0K 2018-10-04 09:29 ./backups/daily/2018-10-04/wallet-db-tob_verifier_2018-10-04_09-29-49.sql.gz
-1.0K 2018-10-04 10:37 ./backups/daily/2018-10-04/wallet-db-tob_verifier_2018-10-04_10-37-12.sql.gz
-22K 2018-10-04 10:43 ./backups/daily/2018-10-04
-22K 2018-10-04 10:43 ./backups/daily
-4.0K 2018-10-03 22:16 ./backups/monthly/2018-10-03
-4.0K 2018-10-03 22:16 ./backups/monthly
-4.0K 2018-10-03 22:16 ./backups/weekly/2018-10-03
-4.0K 2018-10-03 22:16 ./backups/weekly
-61K 2018-10-04 10:43 ./backups/
-================================================================================================================================
-```
-
-## Using the Backup Script
-
-The [backup script](./docker/backup.sh) has a few utility features built into it. For a full list of features and documentation run `backup.sh -h`.
-
-Features include:
-
-- The ability to list the existing backups, `backup.sh -l`
-- Listing the current configuration, `backup.sh -c`
-- Running a single backup cycle, `backup.sh -1`
-- Restoring a database from backup, `backup.sh -r [-f ]`
- - Restore mode will allow you to restore a database to a different location (host, and/or database name) provided it can contact the host and you can provide the appropriate credentials.
-- Verifying backups, `backup.sh [-s] -v [-f ]`
- - Verify mode will restore a backup to the local server to ensure it can be restored without error. Once restored a table query is performed to ensure there was at least one table restored and queries against the database succeed without error. All database files and configuration are destroyed following the tests.
-
-## Using Backup Verification
-
-The [backup script](./docker/backup.sh) supports running manual or scheduled verifications on your backups; `backup.sh [-s] -v [-f ]`. Refer to the script documentation `backup.sh -h`, and the configuration documentation, [backup.conf](config/backup.conf), for additional details on how to use this feature.
-
-## Using the FTP backup
-
-- The FTP backup feature is enabled by specifying the FTP server URL `FTP_URL`.
-- The FTP server must support FTPS.
-- Path can be added to the URL. For example, the URL can be `ftp://ftp.gov.bc.ca/schoolbus-db-backup/`. Note that when adding path, the URL must be ended with `/` as the example.
-- The username and password must be populated in the secret key. Refer to the deployment configuration section.
-- There is a known issue for FTPS with Windows 2012 FTP. http://redoubtsolutions.com/fix-the-supplied-message-is-incomplete-error-when-you-use-an-ftps-client-to-upload-a-file-in-windows/
-
-## Using the Webhook Integration
-
-The Webhook integration feature is enabled by specifying the webhook URL, `WEBHOOK_URL`, in your configuration. It's recommended that you also provide values for `ENVIRONMENT_FRIENDLY_NAME` and `ENVIRONMENT_NAME`, so you can better identify the environment from which the messages originate and do things like produce links to the environment.
-
-The Webhook integration feature was built with Rocket.Chat in mind and an integration script for Rocket.Chat can be found in [rocket.chat.integration.js](./scripts/rocket.chat.integration.js). This script was developed to support the BC OpenShift environment and will format the notifications from the backup script into Rocket.Chat messages (examples below). If you provide values for the environment name (`ENVIRONMENT_FRIENDLY_NAME` and `ENVIRONMENT_NAME`) hyperlinks will be added to the messages to link you to the project console.
-
-Sample Message:
-
-![Sample Message](./docs/SampleRocketChatMessage.png)
-
-Sample Error Message:
-
-![Sample Erros Message](./docs/SampleRocketChatErrorMessage.png)
-
-For information on how setup a webhook in Rocket.Chat refer to [Incoming WebHook Scripting](https://rocket.chat/docs/administrator-guides/integrations/). The **Webhook URL** created during this process is the URL you use for `WEBHOOK_URL` to enable the Webhook integration feature.
-
-## Database Plugin Support
-
-The backup container uses a plugin architecture to perform the database specific operations needed to support various database types.
-
-The plugins are loaded dynamically based on the container type. By default the `backup.null.plugin` will be loaded when the container type is not recognized.
-
-To add support for a new database type:
-1) Update the `getContainerType` function in [backup.container.utils](./docker/backup.container.utils) to detect the new type of database.
-2) Using the existing plugins as reference, implement the database specific scripts for the new database type.
-3) Using the existing docker files as reference, create a new one to build the new container type.
-4) Update the build and deployment templates and their documentation as needed.
-5) Update the project documentation as needed.
-6) Test, test, test.
-7) Submit a PR.
-
-Plugin Examples:
-- [backup.postgres.plugin](./docker/backup.postgres.plugin)
- - Postgres backup implementation.
-
-- [backup.mongo.plugin](./docker/backup.mongo.plugin)
- - Mongo backup implementation.
-
-- [backup.null.plugin](./docker/backup.null.plugin)
- - Sample/Template backup implementation that simply outputs log messages for the various operations.
-
-## Backup
-
-*The following sections describes (some) postgres specific implementation, however the steps are generally the same between database implementations.*
-
-The purpose of the backup app is to do automatic backups. Deploy the Backup app to do daily backups. Viewing the Logs for the Backup App will show a record of backups that have been completed.
-
-The Backup app performs the following sequence of operations:
-
-1. Create a directory that will be used to store the backup.
-2. Use the `pg_dump` and `gzip` commands to make a backup.
-3. Cull backups more than $NUM_BACKUPS (default 31 - configured in deployment script)
-4. Wait/Sleep for a period of time and repeat
-
-Note that with the pod deployment, we support cron schedule(s) or the legacy mode (which uses a simple "sleep") to run the backup periodically. With the OpenShift Scheduled Job deployment, use the backup-cronjob.yaml template and set the schedule via the OpenShift cronjob object SCHEDULE template parameter.
-
-A separate pod is used vs. having the backups run from the Postgres Pod for fault tolerant purposes - to keep the backups separate from the database storage. We don't want to, for example, lose the storage of the database, or have the database and backups storage fill up, and lose both the database and the backups.
-
-### Immediate Backup:
-
-#### Execute a single backup cycle with the pod deployment
-
-- Check the logs of the Backup pod to make sure a backup isn't run right now (pretty unlikely...)
-- Open a terminal window to the pod
-- Run `backup.sh -1`
- - This will run a single backup cycle and exit.
-
-#### Execute an on demand backup using the scheduled job
-
-- Run the following: `oc create job ${SOMEJOBNAME} --from=cronjob/${BACKUP_CRONJOB_NAME}`
- - example: `oc create job my-backup-1 --from=cronjob/backup-postgresql`
- - this will run a single backup job and exit.
- - note: the jobs created in this manner are NOT cleaned up by the scheduler like the automated jobs are.
-
-### Restore
-
-The `backup.sh` script's restore mode makes it very simple to restore the most recent backup of a particular database. It's as simple as running a the following command, for example (run `backup.sh -h` for full details on additional options);
-
- backup.sh -r postgresql/TheOrgBook_Database
-
-Following are more detailed steps to perform a restore of a backup.
-
-1. Log into the OpenShift Console and log into OpenShift on the command shell window.
- 1. The instructions here use a mix of the console and command line, but all could be done from a command shell using "oc" commands.
-1. Scale to 0 all Apps that use the database connection.
- 1. This is necessary as the Apps will need to restart to pull data from the restored backup.
- 1. It is recommended that you also scale down to 0 your client application so that users know the application is unavailable while the database restore is underway.
- 1. A nice addition to this would be a user-friendly "This application is offline" message - not yet implemented.
-1. Restart the database pod as a quick way of closing any other database connections from users using port forward or that have rsh'd to directly connect to the database.
-1. Open an rsh into the backup pod:
- 1. Open a command prompt connection to OpenShift using `oc login` with parameters appropriate for your OpenShift host.
- 1. Change to the OpenShift project containing the Backup App `oc project `
- 1. List pods using `oc get pods`
- 1. Open a remote shell connection to the **backup** pod. `oc rsh `
-1. In the rsh run the backup script in restore mode, `./backup.sh -r `, to restore the desired backup file. For full information on how to use restore mode, refer to the script documentation, `./backup.sh -h`. Have the Admin password for the database handy, the script will ask for it during the restore process.
- 1. The restore script will automatically grant the database user access to the restored database. If there are other users needing access to the database, such as the DBA group, you will need to additionally run the following commands on the database pod itself using `psql`:
- 1. Get a list of the users by running the command `\du`
- 1. For each user that is not "postgres" and $POSTGRESQL_USER, execute the command `GRANT SELECT ON ALL TABLES IN SCHEMA public TO "";`
- 1. If users have been set up with other grants, set them up as well.
-1. Verify that the database restore worked
- 1. On the database pod, query a table - e.g the USER table: `SELECT * FROM "SBI_USER";` - you can look at other tables if you want.
- 1. Verify the expected data is shown.
-1. Exit remote shells back to your local command line
-1. From the Openshift Console restart the app:
- 1. Scale up any pods you scaled down and wait for them to finish starting up. View the logs to verify there were no startup issues.
-1. Verify full application functionality.
-
-Done!
-
-## Tip and Tricks
-
-Please refer to the [Tips and Tricks](./docs/TipsAndTricks.md) document for solutions to known issues.
-
-## Getting Help or Reporting an Issue
-To report bugs/issues/feature requests, please file an [issue](../../issues).
-
-## How to Contribute
-If you would like to contribute, please see our [CONTRIBUTING](./CONTRIBUTING.md) guidelines.
-
-Please note that this project is released with a [Contributor Code of Conduct](./CODE_OF_CONDUCT.md).
-By participating in this project you agree to abide by its terms.
diff --git a/openshift-v3/templates/backup-container-2.0.0/config/backup.conf b/openshift-v3/templates/backup-container-2.0.0/config/backup.conf
deleted file mode 100644
index 2ad3354df..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/config/backup.conf
+++ /dev/null
@@ -1,52 +0,0 @@
-# ============================================================
-# Databases:
-# ------------------------------------------------------------
-# List the databases you want backed up here.
-# Databases will be backed up in the order they are listed.
-#
-# The entries must be in one of the following forms:
-# - /
-# - :/
-# - =/
-# - =:/
-# can be postgres or mongo
-# MUST be specified when you are sharing a
-# single backup.conf file between postgres and mongo
-# backup containers. If you do not specify
-# the listed databases are assumed to be valid for the
-# backup container in which the configuration is mounted.
-#
-# Examples:
-# - postgres=postgresql/my_database
-# - postgres=postgresql:5432/my_database
-# - mongo=mongodb/my_database
-# - mongo=mongodb:27017/my_database
-# -----------------------------------------------------------
-# Cron Scheduling:
-# -----------------------------------------------------------
-# List your backup and verification schedule(s) here as well.
-# The schedule(s) must be listed as cron tabs that
-# execute the script in 'scheduled' mode:
-# - ./backup.sh -s
-#
-# Examples (assuming system's TZ is set to PST):
-# - 0 1 * * * default ./backup.sh -s
-# - Run a backup at 1am Pacific every day.
-#
-# - 0 4 * * * default ./backup.sh -s -v all
-# - Verify the most recent backups for all datbases
-# at 4am Pacific every day.
-# -----------------------------------------------------------
-# Full Example:
-# -----------------------------------------------------------
-# postgres=postgresql:5432/TheOrgBook_Database
-# mongo=mender-mongodb:27017/useradm
-# postgres=wallet-db/tob_issuer
-#
-# 0 1 * * * default ./backup.sh -s
-# 0 4 * * * default ./backup.sh -s -v all
-# ============================================================
-patroni-master-prod:5432/zeva
-0 21 * * * default ./backup.sh -s
-0 22 * * * default ./backup.sh -s -v all
-0 20 * * * default find /backups/minio-backup/* -type d -ctime +7 | xargs rm -rf;mkdir -p /backups/minio-backup/$(date +%Y%m%d);cp -rn /minio-data/* /backups/minio-backup/$(date +%Y%m%d)
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile b/openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile
deleted file mode 100644
index 4cc88a758..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile
+++ /dev/null
@@ -1,42 +0,0 @@
-# This image provides a postgres installation from which to run backups
-FROM registry.access.redhat.com/rhscl/postgresql-10-rhel7
-
-# Change timezone to PST for convenience
-ENV TZ=PST8PDT
-
-# Set the workdir to be root
-WORKDIR /
-
-# Load the backup scripts into the container (must be executable).
-COPY backup.* /
-
-COPY webhook-template.json /
-
-# ========================================================================================================
-# Install go-crond (from https://github.com/BCDevOps/go-crond)
-# - Adds some additional logging enhancements on top of the upstream project;
-# https://github.com/webdevops/go-crond
-#
-# CRON Jobs in OpenShift:
-# - https://blog.danman.eu/cron-jobs-in-openshift/
-# --------------------------------------------------------------------------------------------------------
-ARG SOURCE_REPO=BCDevOps
-ARG GOCROND_VERSION=0.6.3
-ADD https://github.com/$SOURCE_REPO/go-crond/releases/download/$GOCROND_VERSION/go-crond-64-linux /usr/bin/go-crond
-
-USER root
-
-RUN chmod ug+x /usr/bin/go-crond
-# ========================================================================================================
-
-# ========================================================================================================
-# Perform operations that require root privilages here ...
-# --------------------------------------------------------------------------------------------------------
-RUN echo $TZ > /etc/timezone
-# ========================================================================================================
-
-# Important - Reset to the base image's user account.
-USER 26
-
-# Set the default CMD.
-CMD sh /backup.sh
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile_Mongo b/openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile_Mongo
deleted file mode 100644
index 4187c170f..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/Dockerfile_Mongo
+++ /dev/null
@@ -1,42 +0,0 @@
-# This image provides a mongo installation from which to run backups
-FROM registry.access.redhat.com/rhscl/mongodb-36-rhel7
-
-# Change timezone to PST for convenience
-ENV TZ=PST8PDT
-
-# Set the workdir to be root
-WORKDIR /
-
-# Load the backup scripts into the container (must be executable).
-COPY backup.* /
-
-COPY webhook-template.json /
-
-# ========================================================================================================
-# Install go-crond (from https://github.com/BCDevOps/go-crond)
-# - Adds some additional logging enhancements on top of the upstream project;
-# https://github.com/webdevops/go-crond
-#
-# CRON Jobs in OpenShift:
-# - https://blog.danman.eu/cron-jobs-in-openshift/
-# --------------------------------------------------------------------------------------------------------
-ARG SOURCE_REPO=BCDevOps
-ARG GOCROND_VERSION=0.6.3
-ADD https://github.com/$SOURCE_REPO/go-crond/releases/download/$GOCROND_VERSION/go-crond-64-linux /usr/bin/go-crond
-
-USER root
-
-RUN chmod ug+x /usr/bin/go-crond
-# ========================================================================================================
-
-# ========================================================================================================
-# Perform operations that require root privilages here ...
-# --------------------------------------------------------------------------------------------------------
-RUN echo $TZ > /etc/timezone
-# ========================================================================================================
-
-# Important - Reset to the base image's user account.
-USER 26
-
-# Set the default CMD.
-CMD sh /backup.sh
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.config.utils b/openshift-v3/templates/backup-container-2.0.0/docker/backup.config.utils
deleted file mode 100644
index b933846b9..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.config.utils
+++ /dev/null
@@ -1,485 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Configuration Utility Functions:
-# -----------------------------------------------------------------------------------------------------------------
-function getDatabaseName(){
- (
- _databaseSpec=${1}
- _databaseName=$(echo ${_databaseSpec} | sed -n 's~^.*/\(.*$\)~\1~p')
- echo "${_databaseName}"
- )
-}
-
-function getDatabaseType(){
- (
- _databaseSpec=${1}
- _databaseType=$(echo ${_databaseSpec} | sed -n 's~^\(.*\)=.*$~\1~p' | tr '[:upper:]' '[:lower:]')
- echo "${_databaseType}"
- )
-}
-
-function getPort(){
- (
- local OPTIND
- local localhost
- unset localhost
- while getopts :l FLAG; do
- case $FLAG in
- l ) localhost=1 ;;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- if [ -z "${localhost}" ]; then
- portsed="s~^.*:\([[:digit:]]\+\)/.*$~\1~p"
- _port=$(echo ${_databaseSpec} | sed -n "${portsed}")
- fi
-
- echo "${_port}"
- )
-}
-
-function getHostname(){
- (
- local OPTIND
- local localhost
- unset localhost
- while getopts :l FLAG; do
- case $FLAG in
- l ) localhost=1 ;;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- if [ -z "${localhost}" ]; then
- _hostname=$(echo ${_databaseSpec} | sed 's~^.\+[=]~~;s~[:/].*~~')
- else
- _hostname="127.0.0.1"
- fi
-
- echo "${_hostname}"
- )
-}
-
-function getHostPrefix(){
- (
- _hostname=${1}
- _hostPrefix=$(echo ${_hostname} | tr '[:lower:]' '[:upper:]' | sed "s~-~_~g")
- echo "${_hostPrefix}"
- )
-}
-
-function getHostUserParam(){
- (
- _hostname=${1}
- _hostUser=$(getHostPrefix ${_hostname})_USER
- echo "${_hostUser}"
- )
-}
-
-function getHostPasswordParam(){
- (
- _hostname=${1}
- _hostPassword=$(getHostPrefix ${_hostname})_PASSWORD
- echo "${_hostPassword}"
- )
-}
-
-function readConf(){
- (
- local OPTIND
- local readCron
- local quiet
- local all
- unset readCron
- unset quiet
- while getopts cqa FLAG; do
- case $FLAG in
- c ) readCron=1 ;;
- q ) quiet=1 ;;
- a ) all=1 ;;
- esac
- done
- shift $((OPTIND-1))
-
- # Remove all comments and any blank lines
- filters="/^[[:blank:]]*$/d;/^[[:blank:]]*#/d;/#.*/d;"
-
- if [ -z "${readCron}" ]; then
- # Read in the database config ...
- # - Remove any lines that do not match the expected database spec format(s)
- # - [=]/
- # - [=]:/
- filters+="/^[a-zA-Z0-9=_/-]*\(:[0-9]*\)\?\/[a-zA-Z0-9_/-]*$/!d;"
- if [ -z "${all}" ]; then
- # Remove any database configs that are not for the current container type
- # Database configs that do not define the database type are assumed to be for the current container type
- filters+="/\(^[a-zA-Z0-9_/-]*\(:[0-9]*\)\?\/[a-zA-Z0-9_/-]*$\)\|\(^${CONTAINER_TYPE}=\)/!d;"
- fi
- else
- # Read in the cron config ...
- # - Remove any lines that MATCH expected database spec format(s),
- # leaving, what should be, cron tabs.
- filters+="/^[a-zA-Z0-9=_/-]*\(:[0-9]*\)\?\/[a-zA-Z0-9_/-]*$/d;"
- fi
-
- if [ -f ${BACKUP_CONF} ]; then
- if [ -z "${quiet}" ]; then
- echo "Reading backup config from ${BACKUP_CONF} ..." >&2
- fi
- _value=$(sed "${filters}" ${BACKUP_CONF})
- fi
-
- if [ -z "${_value}" ] && [ -z "${readCron}" ]; then
- # Backward compatibility
- if [ -z "${quiet}" ]; then
- echo "Reading backup config from environment variables ..." >&2
- fi
- _value="${DATABASE_SERVICE_NAME}${DEFAULT_PORT:+:${DEFAULT_PORT}}${POSTGRESQL_DATABASE:+/${POSTGRESQL_DATABASE}}"
- fi
-
- echo "${_value}"
- )
-}
-
-function getNumBackupsToRetain(){
- (
- _count=0
- _backupType=${1:-$(getBackupType)}
-
- case "${_backupType}" in
- daily)
- _count=${DAILY_BACKUPS}
- if (( ${_count} <= 0 )) && (( ${WEEKLY_BACKUPS} <= 0 )) && (( ${MONTHLY_BACKUPS} <= 0 )); then
- _count=1
- fi
- ;;
- weekly)
- _count=${WEEKLY_BACKUPS}
- ;;
- monthly)
- _count=${MONTHLY_BACKUPS}
- ;;
- *)
- _count=${NUM_BACKUPS}
- ;;
- esac
-
- echo "${_count}"
- )
-}
-
-function getUsername(){
- (
- _databaseSpec=${1}
- _hostname=$(getHostname ${_databaseSpec})
- _paramName=$(getHostUserParam ${_hostname})
- # Backward compatibility ...
- _username="${!_paramName:-${DATABASE_USER}}"
- echo ${_username}
- )
-}
-
-function getPassword(){
- (
- _databaseSpec=${1}
- _hostname=$(getHostname ${_databaseSpec})
- _paramName=$(getHostPasswordParam ${_hostname})
- # Backward compatibility ...
- _password="${!_paramName:-${DATABASE_PASSWORD}}"
- echo ${_password}
- )
-}
-
-function isLastDayOfMonth(){
- (
- _date=${1:-$(date)}
- _day=$(date -d "${_date}" +%-d)
- _month=$(date -d "${_date}" +%-m)
- _lastDayOfMonth=$(date -d "${_month}/1 + 1 month - 1 day" "+%-d")
-
- if (( ${_day} == ${_lastDayOfMonth} )); then
- return 0
- else
- return 1
- fi
- )
-}
-
-function isLastDayOfWeek(){
- (
- # We're calling Sunday the last dayt of the week in this case.
- _date=${1:-$(date)}
- _dayOfWeek=$(date -d "${_date}" +%u)
-
- if (( ${_dayOfWeek} == 7 )); then
- return 0
- else
- return 1
- fi
- )
-}
-
-function getBackupType(){
- (
- _backupType=""
- if rollingStrategy; then
- if isLastDayOfMonth && (( "${MONTHLY_BACKUPS}" > 0 )); then
- _backupType="monthly"
- elif isLastDayOfWeek; then
- _backupType="weekly"
- else
- _backupType="daily"
- fi
- fi
- echo "${_backupType}"
- )
-}
-
-function rollingStrategy(){
- if [[ "${BACKUP_STRATEGY}" == "rolling" ]] && (( "${WEEKLY_BACKUPS}" >= 0 )) && (( "${MONTHLY_BACKUPS}" >= 0 )); then
- return 0
- else
- return 1
- fi
-}
-
-function dailyStrategy(){
- if [[ "${BACKUP_STRATEGY}" == "daily" ]] || (( "${WEEKLY_BACKUPS}" < 0 )); then
- return 0
- else
- return 1
- fi
-}
-
-function listSettings(){
- _backupDirectory=${1:-$(createBackupFolder -g)}
- _databaseList=${2:-$(readConf -q)}
- _yellow='\e[33m'
- _nc='\e[0m' # No Color
- _notConfigured="${_yellow}not configured${_nc}"
-
- echo -e \\n"Settings:"
- _mode=$(getMode 2>/dev/null)
- echo -e "- Run mode: ${_mode}"\\n
-
- if rollingStrategy; then
- echo "- Backup strategy: rolling"
- fi
- if dailyStrategy; then
- echo "- Backup strategy: daily"
- fi
- if ! rollingStrategy && ! dailyStrategy; then
- echoYellow "- Backup strategy: Unknown backup strategy; ${BACKUP_STRATEGY}"
- _configurationError=1
- fi
- backupType=$(getBackupType)
- if [ -z "${backupType}" ]; then
- echo "- Current backup type: flat daily"
- else
- echo "- Current backup type: ${backupType}"
- fi
- echo "- Backups to retain:"
- if rollingStrategy; then
- echo " - Daily: $(getNumBackupsToRetain daily)"
- echo " - Weekly: $(getNumBackupsToRetain weekly)"
- echo " - Monthly: $(getNumBackupsToRetain monthly)"
- else
- echo " - Total: $(getNumBackupsToRetain)"
- fi
- echo "- Current backup folder: ${_backupDirectory}"
-
- if [[ "${_mode}" != ${ONCE} ]]; then
- if [[ "${_mode}" == ${CRON} ]] || [[ "${_mode}" == ${SCHEDULED} ]]; then
- _backupSchedule=$(readConf -cq)
- echo "- Time Zone: $(date +"%Z %z")"
- fi
- _backupSchedule=$(formatList "${_backupSchedule:-${BACKUP_PERIOD}}")
- echo -e \\n"- Schedule:"
- echo "${_backupSchedule}"
- fi
-
- if [[ "${CONTAINER_TYPE}" == "${UNKNOWN_DB}" ]] && [ -z "${_allowNullPlugin}" ]; then
- echoRed "\n- Container Type: ${CONTAINER_TYPE}"
- _configurationError=1
- else
- echo -e "\n- Container Type: ${CONTAINER_TYPE}"
- fi
-
- _databaseList=$(formatList "${_databaseList}")
- echo "- Databases (filtered by container type):"
- echo "${_databaseList}"
- echo
-
- if [ -z "${FTP_URL}" ]; then
- echo -e "- FTP server: ${_notConfigured}"
- else
- echo "- FTP server: ${FTP_URL}"
- fi
-
- if [ -z "${WEBHOOK_URL}" ]; then
- echo -e "- Webhook Endpoint: ${_notConfigured}"
- else
- echo "- Webhook Endpoint: ${WEBHOOK_URL}"
- fi
-
- if [ -z "${ENVIRONMENT_FRIENDLY_NAME}" ]; then
- echo -e "- Environment Friendly Name: ${_notConfigured}"
- else
- echo -e "- Environment Friendly Name: ${ENVIRONMENT_FRIENDLY_NAME}"
- fi
- if [ -z "${ENVIRONMENT_NAME}" ]; then
- echo -e "- Environment Name (Id): ${_notConfigured}"
- else
- echo "- Environment Name (Id): ${ENVIRONMENT_NAME}"
- fi
-
- if [ ! -z "${_configurationError}" ]; then
- echo
- logError "Configuration error! The script will exit."
- sleep 5
- exit 1
- fi
- echo
-}
-
-function isScheduled(){
- (
- if [ ! -z "${SCHEDULED_RUN}" ]; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function isScripted(){
- (
- if [ ! -z "${SCHEDULED_RUN}" ]; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function restoreMode(){
- (
- if [ ! -z "${_restoreDatabase}" ]; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function verifyMode(){
- (
- if [ ! -z "${_verifyBackup}" ]; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function pruneMode(){
- (
- if [ ! -z "${RUN_PRUNE}" ]; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function cronMode(){
- (
- cronTabs=$(readConf -cq)
- if isInstalled "go-crond" && [ ! -z "${cronTabs}" ]; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function runOnce() {
- if [ ! -z "${RUN_ONCE}" ]; then
- return 0
- else
- return 1
- fi
-}
-
-function getMode(){
- (
- unset _mode
-
- if pruneMode; then
- _mode="${PRUNE}"
- fi
-
- if [ -z "${_mode}" ] && restoreMode; then
- _mode="${RESTORE}"
- fi
-
- if [ -z "${_mode}" ] && verifyMode; then
- # Determine if this is a scheduled verification or a manual one.
- if isScheduled; then
- if cronMode; then
- _mode="${SCHEDULED_VERIFY}"
- else
- _mode="${ERROR}"
- logError "Scheduled mode cannot be used without cron being installed and at least one cron tab being defined in ${BACKUP_CONF}."
- fi
- else
- _mode="${VERIFY}"
- fi
- fi
-
- if [ -z "${_mode}" ] && runOnce; then
- _mode="${ONCE}"
- fi
-
- if [ -z "${_mode}" ] && isScheduled; then
- if cronMode; then
- _mode="${SCHEDULED}"
- else
- _mode="${ERROR}"
- logError "Scheduled mode cannot be used without cron being installed and at least one cron tab being defined in ${BACKUP_CONF}."
- fi
- fi
-
- if [ -z "${_mode}" ] && cronMode; then
- _mode="${CRON}"
- fi
-
- if [ -z "${_mode}" ]; then
- _mode="${LEGACY}"
- fi
-
- echo "${_mode}"
- )
-}
-
-function validateOperation(){
- (
- _databaseSpec=${1}
- _mode=${2}
- _rtnCd=0
-
- if [[ "${_mode}" == ${RESTORE} ]] && ! isForContainerType ${_databaseSpec}; then
- echoRed "\nYou are attempting to restore database '${_databaseSpec}' from a ${CONTAINER_TYPE} container."
- echoRed "Cannot continue with the restore. It must be initiated from the matching container type."
- _rtnCd=1
- fi
-
- return ${_rtnCd}
- )
-}
-# ======================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.container.utils b/openshift-v3/templates/backup-container-2.0.0/docker/backup.container.utils
deleted file mode 100644
index 3bb4115b6..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.container.utils
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Container Utility Functions:
-# -----------------------------------------------------------------------------------------------------------------
-function isPostgres(){
- (
- if isInstalled "psql"; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function isMongo(){
- (
- if isInstalled "mongo"; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function getContainerType(){
- (
- local _containerType=${UNKNOWN_DB}
- _rtnCd=0
-
- if isPostgres; then
- _containerType=${POSTGRE_DB}
- elif isMongo; then
- _containerType=${MONGO_DB}
- else
- _containerType=${UNKNOWN_DB}
- _rtnCd=1
- fi
-
- echo "${_containerType}"
- return ${_rtnCd}
- )
-}
-
-function isForContainerType(){
- (
- _databaseSpec=${1}
- _databaseType=$(getDatabaseType ${_databaseSpec})
-
- # If the database type has not been defined, assume the database spec is valid for the current databse container type.
- if [ -z "${_databaseType}" ] || [[ "${_databaseType}" == "${CONTAINER_TYPE}" ]]; then
- return 0
- else
- return 1
- fi
- )
-}
-# ======================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.file.utils b/openshift-v3/templates/backup-container-2.0.0/docker/backup.file.utils
deleted file mode 100644
index 79dae39e6..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.file.utils
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# File Utility Functions
-# -----------------------------------------------------------------------------------------------------------------
-function makeDirectory()
-{
- (
- # Creates directories with permissions reclusively.
- # ${1} is the directory to be created
- # Inspired by https://unix.stackexchange.com/questions/49263/recursive-mkdir
- directory="${1}"
- test $# -eq 1 || { echo "Function 'makeDirectory' can create only one directory (with it's parent directories)."; exit 1; }
- test -d "${directory}" && return 0
- test -d "$(dirname "${directory}")" || { makeDirectory "$(dirname "${directory}")" || return 1; }
- test -d "${directory}" || { mkdir --mode=g+w "${directory}" || return 1; }
- return 0
- )
-}
-
-function finalizeBackup(){
- (
- _filename=${1}
- _inProgressFilename="${_filename}${IN_PROGRESS_BACKUP_FILE_EXTENSION}"
- _finalFilename="${_filename}${BACKUP_FILE_EXTENSION}"
-
- if [ -f ${_inProgressFilename} ]; then
- mv "${_inProgressFilename}" "${_finalFilename}"
- echo "${_finalFilename}"
- fi
- )
-}
-
-function listExistingBackups(){
- (
- local _backupDir=${1:-${ROOT_BACKUP_DIR}}
- local database
- local databases=$(readConf -q)
- local output="\nDatabase,Current Size"
-
- for database in ${databases}; do
- if isForContainerType ${database}; then
- output+="\n${database},$(getDbSize "${database}")"
- fi
- done
-
- echoMagenta "\n================================================================================================================================"
- echoMagenta "Current Backups:"
- echoMagenta "\n$(echo -ne "${output}" | column -t -s ,)"
- echoMagenta "\n$(df -h ${_backupDir})"
- echoMagenta "--------------------------------------------------------------------------------------------------------------------------------"
- du -ah --time ${_backupDir}
- echoMagenta "================================================================================================================================\n"
- )
-}
-
-function getDirectoryName(){
- (
- local path=${1}
- path="${path%"${path##*[!/]}"}"
- local name="${path##*/}"
- echo "${name}"
- )
-}
-
-function getBackupTypeFromPath(){
- (
- local path=${1}
- path="${path%"${path##*[!/]}"}"
- path="$(dirname "${path}")"
- local backupType=$(getDirectoryName "${path}")
- echo "${backupType}"
- )
-}
-
-function prune(){
- (
- local database
- local backupDirs
- local backupDir
- local backupType
- local backupTypes
- local pruneBackup
- unset backupTypes
- unset backupDirs
- unset pruneBackup
-
- local databases=$(readConf -q)
- if rollingStrategy; then
- backupTypes="daily weekly monthly"
- for backupType in ${backupTypes}; do
- backupDirs="${backupDirs} $(createBackupFolder -g ${backupType})"
- done
- else
- backupDirs=$(createBackupFolder -g)
- fi
-
- if [ ! -z "${_fromBackup}" ]; then
- pruneBackup="$(findBackup "" "${_fromBackup}")"
- while [ ! -z "${pruneBackup}" ]; do
- echoYellow "\nAbout to delete backup file: ${pruneBackup}"
- waitForAnyKey
- rm -rfvd "${pruneBackup}"
-
- # Quietly delete any empty directories that are left behind ...
- find ${ROOT_BACKUP_DIR} -type d -empty -delete > /dev/null 2>&1
- pruneBackup="$(findBackup "" "${_fromBackup}")"
- done
- else
- for backupDir in ${backupDirs}; do
- for database in ${databases}; do
- unset backupType
- if rollingStrategy; then
- backupType=$(getBackupTypeFromPath "${backupDir}")
- fi
- pruneBackups "${backupDir}" "${database}" "${backupType}"
- done
- done
- fi
- )
-}
-
-function pruneBackups(){
- (
- _backupDir=${1}
- _databaseSpec=${2}
- _backupType=${3:-''}
- _pruneDir="$(dirname "${_backupDir}")"
- _numBackupsToRetain=$(getNumBackupsToRetain "${_backupType}")
- _coreFilename=$(generateCoreFilename ${_databaseSpec})
-
- if [ -d ${_pruneDir} ]; then
- let _index=${_numBackupsToRetain}+1
- _filesToPrune=$(find ${_pruneDir}* -type f -printf '%T@ %p\n' | grep ${_coreFilename} | sort -r | tail -n +${_index} | sed 's~^.* \(.*$\)~\1~')
-
- if [ ! -z "${_filesToPrune}" ]; then
- echoYellow "\nPruning ${_coreFilename} backups from ${_pruneDir} ..."
- echo "${_filesToPrune}" | xargs rm -rfvd
-
- # Quietly delete any empty directories that are left behind ...
- find ${ROOT_BACKUP_DIR} -type d -empty -delete > /dev/null 2>&1
- fi
- fi
- )
-}
-
-function touchBackupFile() {
- (
- # For safety, make absolutely certain the directory and file exist.
- # The pruning process removes empty directories, so if there is an error
- # during a backup the backup directory could be deleted.
- _backupFile=${1}
- _backupDir="${_backupFile%/*}"
- makeDirectory ${_backupDir} && touch ${_backupFile}
- )
-}
-
-function findBackup(){
- (
- _databaseSpec=${1}
- _fileName=${2}
-
- # If no backup file was specified, find the most recent for the database.
- # Otherwise treat the value provided as a filter to find the most recent backup file matching the filter.
- if [ -z "${_fileName}" ]; then
- _coreFilename=$(generateCoreFilename ${_databaseSpec})
- _fileName=$(find ${ROOT_BACKUP_DIR}* -type f -printf '%T@ %p\n' | grep ${_coreFilename} | sort | tail -n 1 | sed 's~^.* \(.*$\)~\1~')
- else
- _fileName=$(find ${ROOT_BACKUP_DIR}* -type f -printf '%T@ %p\n' | grep ${_fileName} | sort | tail -n 1 | sed 's~^.* \(.*$\)~\1~')
- fi
-
- echo "${_fileName}"
- )
-}
-
-function createBackupFolder(){
- (
- local OPTIND
- local genOnly
- unset genOnly
- while getopts g FLAG; do
- case $FLAG in
- g ) genOnly=1 ;;
- esac
- done
- shift $((OPTIND-1))
-
- _backupTypeDir="${1:-$(getBackupType)}"
- if [ ! -z "${_backupTypeDir}" ]; then
- _backupTypeDir=${_backupTypeDir}/
- fi
-
- _backupDir="${ROOT_BACKUP_DIR}${_backupTypeDir}`date +\%Y-\%m-\%d`/"
-
- # Don't actually create the folder if we're just generating it for printing the configuation.
- if [ -z "${genOnly}" ]; then
- echo "Making backup directory ${_backupDir} ..." >&2
- if ! makeDirectory ${_backupDir}; then
- logError "Failed to create backup directory ${_backupDir}."
- exit 1;
- fi;
- fi
-
- echo ${_backupDir}
- )
-}
-
-function generateFilename(){
- (
- _backupDir=${1}
- _databaseSpec=${2}
- _coreFilename=$(generateCoreFilename ${_databaseSpec})
- _filename="${_backupDir}${_coreFilename}_`date +\%Y-\%m-\%d_%H-%M-%S`"
- echo ${_filename}
- )
-}
-
-function generateCoreFilename(){
- (
- _databaseSpec=${1}
- _hostname=$(getHostname ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _coreFilename="${_hostname}-${_database}"
- echo ${_coreFilename}
- )
-}
-
-function getFileSize(){
- (
- _filename=${1}
- echo $(du -h "${_filename}" | awk '{print $1}')
- )
-}
-# =================================================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.ftp b/openshift-v3/templates/backup-container-2.0.0/docker/backup.ftp
deleted file mode 100644
index d0a935cf9..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.ftp
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# FTP Support Functions:
-# -----------------------------------------------------------------------------------------------------------------
-function ftpBackup(){
- (
- if [ -z "${FTP_URL}" ] ; then
- return 0
- fi
-
- _filename=${1}
- _filenameWithExtension="${_filename}${BACKUP_FILE_EXTENSION}"
- echo "Transferring ${_filenameWithExtension} to ${FTP_URL}"
- curl --ftp-ssl -T ${_filenameWithExtension} --user ${FTP_USER}:${FTP_PASSWORD} ${FTP_URL}
-
- if [ ${?} -eq 0 ]; then
- logInfo "Successfully transferred ${_filenameWithExtension} to the FTP server"
- else
- logError "Failed to transfer ${_filenameWithExtension} with the exit code ${?}"
- fi
- )
-}
-# =================================================================================================================
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.logging b/openshift-v3/templates/backup-container-2.0.0/docker/backup.logging
deleted file mode 100644
index 50449f0ae..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.logging
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Logging Functions:
-# -----------------------------------------------------------------------------------------------------------------
-function debugMsg (){
- _msg="${@}"
- if [ "${BACKUP_LOG_LEVEL}" == "debug" ]; then
- echoGreen "$(date) - [DEBUG] - ${@}" >&2
- fi
-}
-
-function echoRed (){
- _msg="${@}"
- _red='\e[31m'
- _nc='\e[0m' # No Color
- echo -e "${_red}${_msg}${_nc}"
-}
-
-function echoYellow (){
- _msg="${@}"
- _yellow='\e[33m'
- _nc='\e[0m' # No Color
- echo -e "${_yellow}${_msg}${_nc}"
-}
-
-function echoBlue (){
- _msg="${@}"
- _blue='\e[34m'
- _nc='\e[0m' # No Color
- echo -e "${_blue}${_msg}${_nc}"
-}
-
-function echoGreen (){
- _msg="${@}"
- _green='\e[32m'
- _nc='\e[0m' # No Color
- echo -e "${_green}${_msg}${_nc}"
-}
-
-function echoMagenta (){
- _msg="${@}"
- _magenta='\e[35m'
- _nc='\e[0m' # No Color
- echo -e "${_magenta}${_msg}${_nc}"
-}
-
-function logInfo(){
- (
- infoMsg="${1}"
- echo -e "${infoMsg}"
- postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \
- "${ENVIRONMENT_NAME}" \
- "INFO" \
- "${infoMsg}"
- )
-}
-
-function logWarn(){
- (
- warnMsg="${1}"
- echoYellow "${warnMsg}"
- postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \
- "${ENVIRONMENT_NAME}" \
- "WARN" \
- "${warnMsg}"
- )
-}
-
-function logError(){
- (
- errorMsg="${1}"
- echoRed "[!!ERROR!!] - ${errorMsg}" >&2
- postMsgToWebhook "${ENVIRONMENT_FRIENDLY_NAME}" \
- "${ENVIRONMENT_NAME}" \
- "ERROR" \
- "${errorMsg}"
- )
-}
-
-function getWebhookPayload(){
- _payload=$(eval "cat <<-EOF
-$(<${WEBHOOK_TEMPLATE})
-EOF
-")
- echo "${_payload}"
-}
-
-function formatWebhookMsg(){
- (
- # Escape all double quotes
- # Escape all newlines
- filters='s~"~\\"~g;:a;N;$!ba;s~\n~\\n~g;'
- _value=$(echo "${1}" | sed "${filters}")
- echo "${_value}"
- )
-}
-
-function postMsgToWebhook(){
- (
- if [ -z "${WEBHOOK_URL}" ] && [ -f ${WEBHOOK_TEMPLATE} ]; then
- return 0
- fi
-
- projectFriendlyName=${1}
- projectName=${2}
- statusCode=${3}
- message=$(formatWebhookMsg "${4}")
- curl -s -X POST -H 'Content-Type: application/json' --data "$(getWebhookPayload)" "${WEBHOOK_URL}" > /dev/null
- )
-}
-# =================================================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.misc.utils b/openshift-v3/templates/backup-container-2.0.0/docker/backup.misc.utils
deleted file mode 100644
index cab2ac3e3..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.misc.utils
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# General Utility Functions:
-# -----------------------------------------------------------------------------------------------------------------
-function waitForAnyKey() {
- read -n1 -s -r -p $'\e[33mWould you like to continue?\e[0m Press Ctrl-C to exit, or any other key to continue ...' key
- echo -e \\n
-
- # If we get here the user did NOT press Ctrl-C ...
- return 0
-}
-
-function formatList(){
- (
- filters='s~^~ - ~;'
- _value=$(echo "${1}" | sed "${filters}")
- echo "${_value}"
- )
-}
-
-function isInstalled(){
- rtnVal=$(type "$1" >/dev/null 2>&1)
- rtnCd=$?
- if [ ${rtnCd} -ne 0 ]; then
- return 1
- else
- return 0
- fi
-}
-# ======================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.mongo.plugin b/openshift-v3/templates/backup-container-2.0.0/docker/backup.mongo.plugin
deleted file mode 100644
index 0f5583c60..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.mongo.plugin
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Mongo Backup and Restore Functions:
-# - Dynamically loaded as a plug-in
-# -----------------------------------------------------------------------------------------------------------------
-function onBackupDatabase(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _backupFile=${2}
-
- _hostname=$(getHostname ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${_databaseSpec})
- _portArg=${_port:+"--port=${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
- echoGreen "Backing up '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' to '${_backupFile}' ..."
-
- _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"}
- mongodump -h "${_hostname}" -d "${_database}" ${_authDbArg} ${_portArg} -u "${_username}" -p "${_password}" --quiet --gzip --archive=${_backupFile}
- return ${?}
- )
-}
-
-function onRestoreDatabase(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _fileName=${2}
- _adminPassword=${3}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"--port=${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
- echo -e "Restoring '${_fileName}' to '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' ...\n" >&2
-
- # ToDo:
- # - Add support for restoring to a different database.
- # The following implementation only supports restoring to a database of the same name,
- # unlike the postgres implementation that allows the database to be restored to a database of a different
- # name for testing.
- # Ref: https://stackoverflow.com/questions/36321899/mongorestore-to-a-different-database
-
- _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"}
- mongorestore --drop -h ${_hostname} -d "${_database}" ${_authDbArg} ${_portArg} -u "${_username}" -p "${_password}" --gzip --archive=${_fileName} --nsInclude="*"
- return ${?}
- )
-}
-
-function onStartServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- # Start a local MongoDb instance
- MONGODB_DATABASE=$(getDatabaseName "${_databaseSpec}") \
- MONGODB_USER=$(getUsername "${_databaseSpec}") \
- MONGODB_PASSWORD=$(getPassword "${_databaseSpec}") \
- MONGODB_ADMIN_PASSWORD=$(getPassword "${_databaseSpec}") \
- run-mongod >/dev/null 2>&1 &
- )
-}
-
-function onStopServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _port=$(getPort ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=admin
- _password=$(getPassword ${_databaseSpec})
-
- _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"}
- mongo admin ${_authDbArg} ${_portArg} -u "${_username}" -p "${_password}" --quiet --eval "db.shutdownServer()"
-
- # Delete the database files and configuration
- echo -e "Cleaning up ...\n" >&2
- rm -rf /var/lib/mongodb/data/*
- )
-}
-
-function onPingDbServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- _dbAddressArg=${_hostname}${_port:+:${_port}}${_database:+/${_database}}
- _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"}
- if mongo ${_dbAddressArg} ${_authDbArg} -u "${_username}" -p "${_password}" --quiet --eval='quit()' >/dev/null 2>&1; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function onVerifyBackup(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname -l ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort -l ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- _dbAddressArg=${_hostname}${_port:+:${_port}}${_database:+/${_database}}
- _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"}
- collections=$(mongo ${_dbAddressArg} ${_authDbArg} -u "${_username}" -p "${_password}" --quiet --eval 'var dbs = [];dbs = db.getCollectionNames();for (i in dbs){ print(db.dbs[i]);}';)
- rtnCd=${?}
-
- # Get the size of the restored database
- if (( ${rtnCd} == 0 )); then
- size=$(getDbSize -l "${_databaseSpec}")
- rtnCd=${?}
- fi
-
- if (( ${rtnCd} == 0 )); then
- numResults=$(echo "${collections}"| wc -l)
- if [[ ! -z "${collections}" ]] && (( numResults >= 1 )); then
- # All good
- verificationLog="\nThe restored database contained ${numResults} collections, and is ${size} in size."
- else
- # Not so good
- verificationLog="\nNo collections were found in the restored database ${_database}."
- rtnCd="3"
- fi
- fi
-
- echo ${verificationLog}
- return ${rtnCd}
- )
-}
-
-function onGetDbSize(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- _dbAddressArg=${_hostname}${_port:+:${_port}}${_database:+/${_database}}
- _authDbArg=${MONGODB_AUTHENTICATION_DATABASE:+"--authenticationDatabase ${MONGODB_AUTHENTICATION_DATABASE}"}
- size=$(mongo ${_dbAddressArg} ${_authDbArg} -u "${_username}" -p "${_password}" --quiet --eval 'printjson(db.stats().fsTotalSize)')
- rtnCd=${?}
-
- echo ${size}
- return ${rtnCd}
- )
-}
-# =================================================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.null.plugin b/openshift-v3/templates/backup-container-2.0.0/docker/backup.null.plugin
deleted file mode 100644
index 14ceed050..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.null.plugin
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Null Backup and Restore Functions:
-# - Dynamically loaded as a plug-in
-# - Refer to existing plug-ins for implementation examples.
-# -----------------------------------------------------------------------------------------------------------------
-function onBackupDatabase(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _backupFile=${2}
-
- _hostname=$(getHostname ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
- echoGreen "Backing up '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' to '${_backupFile}' ..."
-
- echoRed "[backup.null.plugin] onBackupDatabase - Not Implemented"
- # echoGreen "Starting database backup ..."
- # Add your database specific backup operation(s) here.
- return ${?}
- )
-}
-
-function onRestoreDatabase(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _fileName=${2}
- _adminPassword=${3}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
- echo -e "Restoring '${_fileName}' to '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' ...\n" >&2
-
- echoRed "[backup.null.plugin] onRestoreDatabase - Not Implemented"
- # Add your database specific restore operation(s) here.
- return ${?}
- )
-}
-
-function onStartServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- echoRed "[backup.null.plugin] onStartServer - Not Implemented"
- # Add your NON-BLOCKING database specific startup operation(s) here.
- # - Start the database server as a background job.
- )
-}
-
-function onStopServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- echoRed "[backup.null.plugin] onStopServer - Not Implemented"
-
- # echo "Shutting down..."
- # Add your database specific shutdown operation(s) here.
-
- # Delete the database files and configuration
- # echo -e "Cleaning up ...\n" >&2
- # Add your database specific cleanup operation(s) here.
- )
-}
-
-function onPingDbServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- echoRed "[backup.null.plugin] onPingDbServer - Not Implemented"
- # Add your database specific ping operation(s) here.
- # if ; then
- # return 0
- # else
- # return 1
- # fi
- )
-}
-
-function onVerifyBackup(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname -l ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort -l ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- echoRed "[backup.null.plugin] onVerifyBackup - Not Implemented"
- # Add your database specific verification operation(s) here.
-
- # echo ${verificationLog}
- # return ${rtnCd}
- )
-}
-
-function onGetDbSize(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"--port ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- echoRed "[backup.null.plugin] onGetDbSize - Not Implemented"
- # Add your database specific get size operation(s) here.
-
- # echo ${size}
- # return ${rtnCd}
- )
-}
-# =================================================================================================================
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.postgres.plugin b/openshift-v3/templates/backup-container-2.0.0/docker/backup.postgres.plugin
deleted file mode 100644
index e5248ac17..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.postgres.plugin
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Postgres Backup and Restore Functions:
-# - Dynamically loaded as a plug-in
-# -----------------------------------------------------------------------------------------------------------------
-function onBackupDatabase(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _backupFile=${2}
-
- _hostname=$(getHostname ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${_databaseSpec})
- _portArg=${_port:+"-p ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
- echoGreen "Backing up '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' to '${_backupFile}' ..."
-
- PGPASSWORD=${_password} pg_dump -Fp -h "${_hostname}" ${_portArg} -U "${_username}" "${_database}" | gzip > ${_backupFile}
- return ${PIPESTATUS[0]}
- )
-}
-
-function onRestoreDatabase(){
- (
- local OPTIND
- local unset quiet
- local unset flags
- while getopts :q FLAG; do
- case $FLAG in
- q )
- quiet=1
- flags+="-${FLAG} "
- ;;
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _fileName=${2}
- _adminPassword=${3}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"-p ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
- echo -e "Restoring '${_fileName}' to '${_hostname}${_port:+:${_port}}${_database:+/${_database}}' ...\n" >&2
-
- export PGPASSWORD=${_adminPassword}
- _rtnCd=0
-
- # Drop
- if (( ${_rtnCd} == 0 )); then
- psql -h "${_hostname}" ${_portArg} -ac "DROP DATABASE \"${_database}\";"
- _rtnCd=${?}
- echo
- fi
-
- # Create
- if (( ${_rtnCd} == 0 )); then
- psql -h "${_hostname}" ${_portArg} -ac "CREATE DATABASE \"${_database}\";"
- _rtnCd=${?}
- echo
- fi
-
- # Grant User Access
- if (( ${_rtnCd} == 0 )); then
- psql -h "${_hostname}" ${_portArg} -ac "GRANT ALL ON DATABASE \"${_database}\" TO \"${_username}\";"
- _rtnCd=${?}
- echo
- fi
-
- # Restore
- if (( ${_rtnCd} == 0 )); then
- gunzip -c "${_fileName}" | psql -v ON_ERROR_STOP=1 -x -h "${_hostname}" ${_portArg} -d "${_database}"
- # Get the status code from psql specifically. ${?} would only provide the status of the last command, psql in this case.
- _rtnCd=${PIPESTATUS[1]}
- fi
-
- # List tables
- if [ -z "${quiet}" ] && (( ${_rtnCd} == 0 )); then
- psql -h "${_hostname}" ${_portArg} -d "${_database}" -c "\d"
- _rtnCd=${?}
- fi
-
- return ${_rtnCd}
- )
-}
-
-function onStartServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- # Start a local PostgreSql instance
- POSTGRESQL_DATABASE=$(getDatabaseName "${_databaseSpec}") \
- POSTGRESQL_USER=$(getUsername "${_databaseSpec}") \
- POSTGRESQL_PASSWORD=$(getPassword "${_databaseSpec}") \
- run-postgresql >/dev/null 2>&1 &
- )
-}
-
-function onStopServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- # Stop the local PostgreSql instance
- pg_ctl stop -D /var/lib/pgsql/data/userdata
-
- # Delete the database files and configuration
- echo -e "Cleaning up ...\n"
- rm -rf /var/lib/pgsql/data/userdata
- )
-}
-
-function onPingDbServer(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"-p ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- if PGPASSWORD=${_password} psql -h ${_hostname} ${_portArg} -U ${_username} -q -d ${_database} -c 'SELECT 1' >/dev/null 2>&1; then
- return 0
- else
- return 1
- fi
- )
-}
-
-function onVerifyBackup(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname -l ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort -l ${_databaseSpec})
- _portArg=${_port:+"-p ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- debugMsg "backup.postgres.plugin - onVerifyBackup"
- tables=$(psql -h "${_hostname}" ${_portArg} -d "${_database}" -t -c "SELECT table_name FROM information_schema.tables WHERE table_schema='${TABLE_SCHEMA}' AND table_type='BASE TABLE';")
- rtnCd=${?}
-
- # Get the size of the restored database
- if (( ${rtnCd} == 0 )); then
- size=$(getDbSize -l "${_databaseSpec}")
- rtnCd=${?}
- fi
-
- if (( ${rtnCd} == 0 )); then
- numResults=$(echo "${tables}"| wc -l)
- if [[ ! -z "${tables}" ]] && (( numResults >= 1 )); then
- # All good
- verificationLog="\nThe restored database contained ${numResults} tables, and is ${size} in size."
- else
- # Not so good
- verificationLog="\nNo tables were found in the restored database."
- rtnCd="3"
- fi
- fi
-
- echo ${verificationLog}
- return ${rtnCd}
- )
-}
-
-function onGetDbSize(){
- (
- local OPTIND
- local unset flags
- while getopts : FLAG; do
- case $FLAG in
- ? ) flags+="-${OPTARG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
-
- _hostname=$(getHostname ${flags} ${_databaseSpec})
- _database=$(getDatabaseName ${_databaseSpec})
- _port=$(getPort ${flags} ${_databaseSpec})
- _portArg=${_port:+"-p ${_port}"}
- _username=$(getUsername ${_databaseSpec})
- _password=$(getPassword ${_databaseSpec})
-
- size=$(PGPASSWORD=${_password} psql -h "${_hostname}" ${_portArg} -U "${_username}" -d "${_database}" -t -c "SELECT pg_size_pretty(pg_database_size(current_database())) as size;")
- rtnCd=${?}
-
- echo ${size}
- return ${rtnCd}
- )
-}
-# =================================================================================================================
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.server.utils b/openshift-v3/templates/backup-container-2.0.0/docker/backup.server.utils
deleted file mode 100644
index 9e938a150..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.server.utils
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Backup Server Utility Functions:
-# -----------------------------------------------------------------------------------------------------------------
-function startCron(){
- logInfo "Starting backup server in cron mode ..."
- listSettings
- echoBlue "Starting go-crond as a background task ...\n"
- CRON_CMD="go-crond -v --default-user=${UID} --allow-unprivileged ${BACKUP_CONF}"
- exec ${CRON_CMD} &
- wait
-}
-
-function startLegacy(){
- (
- while true; do
- runBackups
-
- echoYellow "Sleeping for ${BACKUP_PERIOD} ...\n"
- sleep ${BACKUP_PERIOD}
- done
- )
-}
-
-function shutDown(){
- jobIds=$(jobs | awk -F '[][]' '{print $2}' )
- for jobId in ${jobIds} ; do
- echo "Shutting down background job '${jobId}' ..."
- kill %${jobId}
- done
-
- if [ ! -z "${jobIds}" ]; then
- echo "Waiting for any background jobs to complete ..."
- fi
- wait
-
- exit 0
-}
-# ======================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.settings b/openshift-v3/templates/backup-container-2.0.0/docker/backup.settings
deleted file mode 100644
index 7de738c8a..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.settings
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/bash
-# ======================================================================================
-# Default Settings
-# --------------------------------------------------------------------------------------
-export BACKUP_FILE_EXTENSION=".sql.gz"
-export IN_PROGRESS_BACKUP_FILE_EXTENSION=".sql.gz.in_progress"
-export DEFAULT_PORT=${POSTGRESQL_PORT_NUM:-5432}
-export DATABASE_SERVICE_NAME=${DATABASE_SERVICE_NAME:-postgresql}
-export POSTGRESQL_DATABASE=${POSTGRESQL_DATABASE:-my_postgres_db}
-export TABLE_SCHEMA=${TABLE_SCHEMA:-public}
-
-# Supports:
-# - daily
-# - rolling
-export BACKUP_STRATEGY=$(echo "${BACKUP_STRATEGY:-rolling}" | tr '[:upper:]' '[:lower:]')
-export BACKUP_PERIOD=${BACKUP_PERIOD:-1d}
-export ROOT_BACKUP_DIR=${ROOT_BACKUP_DIR:-${BACKUP_DIR:-/backups/}}
-export BACKUP_CONF=${BACKUP_CONF:-backup.conf}
-
-# Used to prune the total number of backup when using the daily backup strategy.
-# Default provides for one full month of backups
-export NUM_BACKUPS=${NUM_BACKUPS:-31}
-
-# Used to prune the total number of backup when using the rolling backup strategy.
-# Defaults provide for:
-# - A week's worth of daily backups
-# - A month's worth of weekly backups
-# - The previous month's backup
-export DAILY_BACKUPS=${DAILY_BACKUPS:-6}
-export WEEKLY_BACKUPS=${WEEKLY_BACKUPS:-4}
-export MONTHLY_BACKUPS=${MONTHLY_BACKUPS:-1}
-
-# Webhook defaults
-WEBHOOK_TEMPLATE=${WEBHOOK_TEMPLATE:-webhook-template.json}
-
-# Modes:
-export ONCE="once"
-export SCHEDULED="scheduled"
-export RESTORE="restore"
-export VERIFY="verify"
-export CRON="cron"
-export LEGACY="legacy"
-export ERROR="error"
-export SCHEDULED_VERIFY="scheduled-verify"
-export PRUNE="prune"
-
-# Supported Database Containers
-export UNKNOWN_DB="null"
-export MONGO_DB="mongo"
-export POSTGRE_DB="postgres"
-export CONTAINER_TYPE="$(getContainerType)"
-
-# Other:
-export DATABASE_SERVER_TIMEOUT=${DATABASE_SERVER_TIMEOUT:-120}
-# ======================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.sh b/openshift-v3/templates/backup-container-2.0.0/docker/backup.sh
deleted file mode 100755
index f1e17d87a..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.sh
+++ /dev/null
@@ -1,140 +0,0 @@
-#!/bin/bash
-
-# ======================================================================================
-# Imports
-# --------------------------------------------------------------------------------------
-. ./backup.usage # Usage information
-. ./backup.logging # Logging functions
-. ./backup.config.utils # Configuration functions
-. ./backup.container.utils # Container Utility Functions
-. ./backup.ftp # FTP Support functions
-. ./backup.misc.utils # General Utility Functions
-. ./backup.file.utils # File Utility Functions
-. ./backup.utils # Primary Database Backup and Restore Functions
-. ./backup.server.utils # Backup Server Utility Functions
-. ./backup.settings # Default Settings
-# ======================================================================================
-
-# ======================================================================================
-# Initialization:
-# --------------------------------------------------------------------------------------
-trap shutDown EXIT TERM
-
-# Load database plug-in based on the container type ...
-. ./backup.${CONTAINER_TYPE}.plugin > /dev/null 2>&1
-if [[ ${?} != 0 ]]; then
- echoRed "backup.${CONTAINER_TYPE}.plugin not found."
-
- # Default to null plugin.
- export CONTAINER_TYPE=${UNKNOWN_DB}
- . ./backup.${CONTAINER_TYPE}.plugin > /dev/null 2>&1
-fi
-
-while getopts nclr:v:f:1spha: FLAG; do
- case $FLAG in
- n)
- # Allow null database plugin ...
- # Without this flag loading the null plugin is considered a configuration error.
- # The null plugin can be used for testing.
- export _allowNullPlugin=1
- ;;
- c)
- echoBlue "\nListing configuration settings ..."
- listSettings
- exit 0
- ;;
- l)
- listExistingBackups ${ROOT_BACKUP_DIR}
- exit 0
- ;;
- r)
- # Trigger restore mode ...
- export _restoreDatabase=${OPTARG}
- ;;
- v)
- # Trigger verify mode ...
- export _verifyBackup=${OPTARG}
- ;;
- f)
- # Optionally specify the backup file to verify or restore from ...
- export _fromBackup=${OPTARG}
- ;;
- 1)
- export RUN_ONCE=1
- ;;
- s)
- export SCHEDULED_RUN=1
- ;;
- p)
- export RUN_PRUNE=1
- ;;
- a)
- export _adminPassword=${OPTARG}
- ;;
- h)
- usage
- ;;
- \?)
- echo -e \\n"Invalid option: -${OPTARG}"\\n
- usage
- ;;
- esac
-done
-shift $((OPTIND-1))
-# ======================================================================================
-
-# ======================================================================================
-# Main Script
-# --------------------------------------------------------------------------------------
-case $(getMode) in
- ${ONCE})
- runBackups
- echoGreen "Single backup run complete.\n"
- ;;
-
- ${SCHEDULED})
- runBackups
- echoGreen "Scheduled backup run complete.\n"
- ;;
-
- ${RESTORE})
- unset restoreFlags
- if isScripted; then
- restoreFlags="-q"
- fi
-
- if validateOperation "${_restoreDatabase}" "${RESTORE}"; then
- restoreDatabase ${restoreFlags} "${_restoreDatabase}" "${_fromBackup}"
- fi
- ;;
-
- ${VERIFY})
- verifyBackups "${_verifyBackup}" "${_fromBackup}"
- ;;
-
- ${SCHEDULED_VERIFY})
- verifyBackups -q "${_verifyBackup}" "${_fromBackup}"
- ;;
-
- ${CRON})
- startCron
- ;;
-
- ${LEGACY})
- startLegacy
- ;;
-
- ${PRUNE})
- prune
- ;;
-
- ${ERROR})
- echoRed "A configuration error has occurred, review the details above."
- usage
- ;;
- *)
- echoYellow "Unrecognized operational mode; ${_mode}"
- usage
- ;;
-esac
-# ======================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.usage b/openshift-v3/templates/backup-container-2.0.0/docker/backup.usage
deleted file mode 100644
index 32238fdff..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.usage
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Usage:
-# -----------------------------------------------------------------------------------------------------------------
-function usage () {
- cat <<-EOF
-
- Automated backup script for PostgreSQL and MongoDB databases.
-
- There are two modes of scheduling backups:
- - Cron Mode:
- - Allows one or more schedules to be defined as cron tabs in ${BACKUP_CONF}.
- - If cron (go-crond) is installed (which is handled by the Docker file) and at least one cron tab is defined, the script will startup in Cron Mode,
- otherwise it will default to Legacy Mode.
- - Refer to ${BACKUP_CONF} for additional details and exples of using cron scheduling.
-
- - Legacy Mode:
- - Uses a simple sleep command to set the schedule based on the setting of BACKUP_PERIOD; defaults to ${BACKUP_PERIOD}
-
- Refer to the project documentation for additional details on how to use this script.
- - https://github.com/BCDevOps/backup-container
-
- Usage:
- $0 [options]
-
- Standard Options:
- =================
- -h prints this usage documentation.
-
- -1 run once.
- Performs a single set of backups and exits.
-
- -s run in scheduled/silent (no questions asked) mode.
- A flag to be used by cron scheduled backups to indicate they are being run on a schedule.
- Requires cron (go-crond) to be installed and at least one cron tab to be defined in ${BACKUP_CONF}
- Refer to ${BACKUP_CONF} for additional details and examples of using cron scheduling.
-
- -l lists existing backups.
- Great for listing the available backups for a restore.
-
- -c lists the current configuration settings and exits.
- Great for confirming the current settings, and listing the databases included in the backup schedule.
-
- -p prune backups
- Used to manually prune backups.
- This can be used with the '-f' option, see below, to prune specific backups or sets of backups.
- Use caution when using the '-f' option.
-
- Verify Options:
- ================
- The verify process performs the following basic operations:
- - Start a local database server instance.
- - Restore the selected backup locally, watching for errors.
- - Run a table query on the restored database as a simple test to ensure tables were restored
- and queries against the database succeed without error.
- - Stop the local database server instance.
- - Delete the local database and configuration.
-
- -v ; in the form =/, or =:/
- where defaults to container database type if omitted
- must be one of "postgres" or "mongo"
- must be specified in a mixed database container project
-
- Triggers verify mode and starts verify mode on the specified database.
-
- Example:
- $0 -v postgresql=postgresql:5432/TheOrgBook_Database
- - Would start the verification process on the database using the most recent backup for the database.
-
- $0 -v all
- - Verify the most recent backup of all databases.
-
- -f ; an OPTIONAL filter to use to find/identify the backup file to restore.
- Refer to the same option under 'Restore Options' for details.
-
- Restore Options:
- ================
- The restore process performs the following basic operations:
- - Drop and recreate the selected database.
- - Grant the database user access to the recreated database
- - Restore the database from the selected backup file
-
- Have the 'Admin' (postgres or mongo) password handy, the script will ask you for it during the restore.
-
- When in restore mode, the script will list the settings it will use and wait for your confirmation to continue.
- This provides you with an opportunity to ensure you have selected the correct database and backup file
- for the job.
-
- Restore mode will allow you to restore a database to a different location (host, and/or database name) provided
- it can contact the host and you can provide the appropriate credentials. If you choose to do this, you will need
- to provide a file filter using the '-f' option, since the script will likely not be able to determine which backup
- file you would want to use. This functionality provides a convenient way to test your backups or migrate your
- database/data without affecting the original database.
-
- -r ; in the form =/, or =:/
- where defaults to container database type if omitted
- must be one of "postgres" or "mongo"
- must be specified in a mixed database container project
-
- Triggers restore mode and starts restore mode on the specified database.
-
- Example:
- $0 -r postgresql:5432/TheOrgBook_Database/postgres
- - Would start the restore process on the database using the most recent backup for the database.
-
- -f ; an OPTIONAL filter to use to find/identify the backup file to restore.
- This can be a full or partial file specification. When only part of a filename is specified the restore process
- attempts to find the most recent backup matching the filter.
- If not specified, the restore process attempts to locate the most recent backup file for the specified database.
-
- Examples:
- $0 -r postgresql=wallet-db/test_db/postgres -f wallet-db-tob_holder
- - Would try to find the latest backup matching on the partial file name provided.
-
- $0 -r wallet-db/test_db/postgres -f /backups/daily/2018-11-07/wallet-db-tob_holder_2018-11-07_23-59-35.sql.gz
- - Would use the specific backup file.
-
- $0 -r wallet-db/test_db/postgres -f wallet-db-tob_holder_2018-11-07_23-59-35.sql.gz
- - Would use the specific backup file regardless of its location in the root backup folder.
-
- -s OPTIONAL flag. Use with caution. Could cause unintentional data loss.
- Run the restore in scripted/scheduled mode. In this mode the restore will not ask you to confirm the settings,
- nor will ask you for the 'Admin' password. It will simply attempt to restore a database from a backup.
- It's up to you to ensure it's targeting the correct database and using the correct backup file.
-
- -a ; an OPTIONAL flag used to specify the 'Admin' password.
- Use with the '-s' flag to specify the 'Admin' password. Under normal usage conditions it's better to supply the
- password when prompted so it is not visible on the console.
-
-EOF
-exit 1
-}
-# =================================================================================================================
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/backup.utils b/openshift-v3/templates/backup-container-2.0.0/docker/backup.utils
deleted file mode 100644
index ed54af7d4..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/backup.utils
+++ /dev/null
@@ -1,268 +0,0 @@
-#!/bin/bash
-# =================================================================================================================
-# Primary Database Backup and Restore Functions:
-# -----------------------------------------------------------------------------------------------------------------
-function backupDatabase(){
- (
- _databaseSpec=${1}
- _fileName=${2}
-
- _backupFile="${_fileName}${IN_PROGRESS_BACKUP_FILE_EXTENSION}"
-
- touchBackupFile "${_backupFile}"
- onBackupDatabase "${_databaseSpec}" "${_backupFile}"
- _rtnCd=${?}
-
- if (( ${_rtnCd} != 0 )); then
- rm -rfvd ${_backupFile}
- fi
-
- return ${_rtnCd}
- )
-}
-
-function restoreDatabase(){
- (
- local OPTIND
- local quiet
- local localhost
- unset quiet
- unset localhost
- unset flags
- while getopts ql FLAG; do
- case $FLAG in
- q )
- quiet=1
- flags+="-${FLAG} "
- ;;
- * ) flags+="-${FLAG} ";;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _fileName=${2}
- _fileName=$(findBackup "${_databaseSpec}" "${_fileName}")
-
- if [ -z "${quiet}" ]; then
- echoBlue "\nRestoring database ..."
- echo -e "\nSettings:"
- echo "- Database: ${_databaseSpec}"
-
- if [ ! -z "${_fileName}" ]; then
- echo -e "- Backup file: ${_fileName}\n"
- else
- echoRed "- Backup file: No backup file found or specified. Cannot continue with the restore.\n"
- exit 1
- fi
- waitForAnyKey
- fi
-
- if [ -z "${quiet}" ] && [ -z "${_adminPassword}" ]; then
- # Ask for the Admin Password for the database, if it has not already been provided.
- _msg="Admin password (${_databaseSpec}):"
- _yellow='\033[1;33m'
- _nc='\033[0m' # No Color
- _message=$(echo -e "${_yellow}${_msg}${_nc}")
- read -r -s -p $"${_message}" _adminPassword
- echo -e "\n"
- fi
-
- local startTime=${SECONDS}
- onRestoreDatabase ${flags} "${_databaseSpec}" "${_fileName}" "${_adminPassword}"
- _rtnCd=${?}
-
- local duration=$(($SECONDS - $startTime))
- if (( ${_rtnCd} == 0 )); then
- echoGreen "\nRestore complete - Elapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s\n"
- else
- echoRed "\nRestore failed.\n" >&2
- fi
-
- return ${_rtnCd}
- )
-}
-
-function runBackups(){
- (
- echoBlue "\nStarting backup process ..."
- databases=$(readConf)
- backupDir=$(createBackupFolder)
- listSettings "${backupDir}" "${databases}"
-
- for database in ${databases}; do
- if isForContainerType ${database}; then
- local startTime=${SECONDS}
- filename=$(generateFilename "${backupDir}" "${database}")
- backupDatabase "${database}" "${filename}"
- rtnCd=${?}
- local duration=$(($SECONDS - $startTime))
- local elapsedTime="\n\nElapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s - Status Code: ${rtnCd}"
-
- if (( ${rtnCd} == 0 )); then
- backupPath=$(finalizeBackup "${filename}")
- dbSize=$(getDbSize "${database}")
- backupSize=$(getFileSize "${backupPath}")
- logInfo "Successfully backed up ${database}.\nBackup written to ${backupPath}.\nDatabase Size: ${dbSize}\nBackup Size: ${backupSize}${elapsedTime}"
- ftpBackup "${filename}"
- pruneBackups "${backupDir}" "${database}"
- else
- logError "Failed to backup ${database}.${elapsedTime}"
- fi
- fi
- done
-
- listExistingBackups ${ROOT_BACKUP_DIR}
- )
-}
-
-function startServer(){
- (
- # Start a local server instance ...
- onStartServer ${@}
-
- # Wait for server to start ...
- local startTime=${SECONDS}
- rtnCd=0
- printf "waiting for server to start"
- while ! pingDbServer ${@}; do
- printf "."
- local duration=$(($SECONDS - $startTime))
- if (( ${duration} >= ${DATABASE_SERVER_TIMEOUT} )); then
- echoRed "\nThe server failed to start within ${duration} seconds.\n"
- rtnCd=1
- break
- fi
- sleep 1
- done
- echo
- return ${rtnCd}
- )
-}
-
-function stopServer(){
- (
- onStopServer ${@}
- )
-}
-
-function pingDbServer(){
- (
- onPingDbServer ${@}
- return ${?}
- )
-}
-
-function verifyBackups(){
- (
- local OPTIND
- local flags
- unset flags
- while getopts q FLAG; do
- case $FLAG in
- * ) flags+="-${FLAG} " ;;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _fileName=${2}
- if [[ "${_databaseSpec}" == "all" ]]; then
- databases=$(readConf -q)
- else
- databases=${_databaseSpec}
- fi
-
- for database in ${databases}; do
- if isForContainerType ${database}; then
- verifyBackup ${flags} "${database}" "${_fileName}"
- fi
- done
- )
-}
-
-function verifyBackup(){
- (
- local OPTIND
- local quiet
- unset quiet
- while getopts q FLAG; do
- case $FLAG in
- q ) quiet=1 ;;
- esac
- done
- shift $((OPTIND-1))
-
- _databaseSpec=${1}
- _fileName=${2}
- _fileName=$(findBackup "${_databaseSpec}" "${_fileName}")
-
- echoBlue "\nVerifying backup ..."
- echo -e "\nSettings:"
- echo "- Database: ${_databaseSpec}"
-
- if [ ! -z "${_fileName}" ]; then
- echo -e "- Backup file: ${_fileName}\n"
- else
- echoRed "- Backup file: No backup file found or specified. Cannot continue with the backup verification.\n"
- exit 0
- fi
-
- if [ -z "${quiet}" ]; then
- waitForAnyKey
- fi
-
- local startTime=${SECONDS}
- startServer -l "${_databaseSpec}"
- rtnCd=${?}
-
- # Restore the database
- if (( ${rtnCd} == 0 )); then
- if [ -z "${quiet}" ]; then
- restoreDatabase -ql "${_databaseSpec}" "${_fileName}"
- rtnCd=${?}
- else
- # Filter out stdout, keep stderr
- echo "Restoring from backup ..."
- restoreLog=$(restoreDatabase -ql "${_databaseSpec}" "${_fileName}" 2>&1 >/dev/null)
- rtnCd=${?}
-
- if [ ! -z "${restoreLog}" ] && (( ${rtnCd} == 0 )); then
- echo ${restoreLog}
- unset restoreLog
- elif [ ! -z "${restoreLog}" ] && (( ${rtnCd} != 0 )); then
- restoreLog="\n\nThe following issues were encountered during backup verification;\n${restoreLog}"
- fi
- fi
- fi
-
- # Ensure there are tables in the databse and general queries work
- if (( ${rtnCd} == 0 )); then
- verificationLog=$(onVerifyBackup "${_databaseSpec}")
- rtnCd=${?}
- fi
-
- # Stop the database server
- stopServer "${_databaseSpec}"
- local duration=$(($SECONDS - $startTime))
- local elapsedTime="\n\nElapsed time: $(($duration/3600))h:$(($duration%3600/60))m:$(($duration%60))s - Status Code: ${rtnCd}"
-
- if (( ${rtnCd} == 0 )); then
- logInfo "Successfully verified backup: ${_fileName}${verificationLog}${restoreLog}${elapsedTime}"
- else
- logError "Backup verification failed: ${_fileName}${verificationLog}${restoreLog}${elapsedTime}"
- fi
- return ${rtnCd}
- )
-}
-
-function getDbSize(){
- (
- size=$(onGetDbSize ${@})
- rtnCd=${?}
-
- echo ${size}
- return ${rtnCd}
- )
-}
-# =================================================================================================================
diff --git a/openshift-v3/templates/backup-container-2.0.0/docker/webhook-template.json b/openshift-v3/templates/backup-container-2.0.0/docker/webhook-template.json
deleted file mode 100644
index 1c2c75960..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docker/webhook-template.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "projectFriendlyName": "${projectFriendlyName}",
- "projectName": "${projectName}",
- "statusCode": "${statusCode}",
- "message": "${message}"
-}
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docs/ExampleLog.md b/openshift-v3/templates/backup-container-2.0.0/docs/ExampleLog.md
deleted file mode 100644
index c769467f1..000000000
--- a/openshift-v3/templates/backup-container-2.0.0/docs/ExampleLog.md
+++ /dev/null
@@ -1,62 +0,0 @@
-
-## An example of the backup container in action
-```
-Starting backup process ...
-Reading backup config from backup.conf ...
-Making backup directory /backups/daily/2020-02-28/ ...
-
-Settings:
-- Run mode: scheduled
-
-- Backup strategy: rolling
-- Current backup type: daily
-- Backups to retain:
- - Daily: 6
- - Weekly: 4
- - Monthly: 1
-- Current backup folder: /backups/daily/2020-02-28/
-- Time Zone: PST -0800
-
-- Schedule:
- - 0 1 * * * default ./backup.sh -s
- - 0 4 * * * default ./backup.sh -s -v all
-
-- Container Type: mongo
-- Databases (filtered by container type):
- - mongo=identity-kit-db-bc/identity_kit_db
-
-- FTP server: not configured
-- Webhook Endpoint: https://chat.[ocp name].gov.bc.ca/hooks/***
-- Environment Friendly Name: Verifiable Organizations Network (mongo-test)
-- Environment Name (Id): devex-von-test
-
-Backing up 'identity-kit-db-bc/identity_kit_db' to '/backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_08-07-10.sql.gz.in_progress' ...
-Successfully backed up mongo=identity-kit-db-bc/identity_kit_db.
-Backup written to /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_08-07-10.sql.gz.
-Database Size: 1073741824
-Backup Size: 4.0K
-
-Elapsed time: 0h:0m:0s - Status Code: 0
-
-================================================================================================================================
-Current Backups:
-
-Database Current Size
-mongo=identity-kit-db-bc/identity_kit_db 1073741824
-
-Filesystem Size Used Avail Use% Mounted on
-192.168.111.90:/trident_qtree_pool_file_standard_WKDMGDWTSQ/file_standard_devex_von_test_backup_mongo_54218 1.0G 0 1.0G 0% /backups
---------------------------------------------------------------------------------------------------------------------------------
-4.0K 2020-02-27 13:26 /backups/daily/2020-02-27/identity-kit-db-bc-identity_kit_db_2020-02-27_13-26-21.sql.gz
-4.0K 2020-02-27 13:27 /backups/daily/2020-02-27/identity-kit-db-bc-identity_kit_db_2020-02-27_13-27-10.sql.gz
-12K 2020-02-27 13:27 /backups/daily/2020-02-27
-4.0K 2020-02-28 06:44 /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_06-44-19.sql.gz
-4.0K 2020-02-28 07:12 /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_07-12-29.sql.gz
-4.0K 2020-02-28 08:07 /backups/daily/2020-02-28/identity-kit-db-bc-identity_kit_db_2020-02-28_08-07-10.sql.gz
-16K 2020-02-28 08:07 /backups/daily/2020-02-28
-32K 2020-02-28 08:07 /backups/daily
-36K 2020-02-28 08:07 /backups/
-================================================================================================================================
-
-Scheduled backup run complete.
-```
\ No newline at end of file
diff --git a/openshift-v3/templates/backup-container-2.0.0/docs/SampleRocketChatErrorMessage.png b/openshift-v3/templates/backup-container-2.0.0/docs/SampleRocketChatErrorMessage.png
deleted file mode 100644
index dfeb8203595fa82e938d598a6f1407bbbfc034d4..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001
literal 18609
zcmbSy1x#Fjw=M4O?(PnQyA*5D;_l8MrNyDR6?Z6Fpt#fGrMO#hXK;6Yr~mK1FZVu6
z?wf>UCPOCYoZptU)?PdEt(pQl3JD4n6coCWqO1lK6m$Xbz7z=tcvms=S_XW825Trt
zLsgBD?E@d+Y@}4BprC5wQJ>7DSNGF)~VEYm@U_?nDbv-
zoyP3gTfXb|*5e}+_SjpOLVCZ_<~f~CJqUe$eH~{Xrh**jxioMmcqRz@QR3{fZfbty
z$kaW$-7b2$@8$R-$U{Up-o?g*M}&@#iIKUPmcmuUj}P6Asjt~n(?3HXROdI
zL$O7CFW7i8b(vQ06Tp>nr4pSduR6i8UNcVXM+@~d2pD8USYa6Pl%O_;d5+0lteR!j
z|l2*R9h9V$VN5oEJotlrUg7$HxTh@RrPvR6I>n=HWXGr?M-=Q>^^-0q2SPQTS|Q
zbN}!@4hO?U|RZr%nG31vf4EzA;~{FD^X(D`y*
zO`R@u89H0JRAbiLGQ`Zv8UowRqzjdZm!HO_2SeRqor3FE@_iBfBs%tNJ0mStzi)QZ
zlh^7WZ^>6ZkqarLbq#;C$r{*=D)hsvGWKkp0xnxB1c7>x$p8~QseO(KP6jE91IC<2
zB~7OXCtvRUa?3E@xPQuXPn=0jADL^mvE%z4OnBUEUO>Ate(0sj(I>+JbS-8Zq_nIH3_#IW@@)nzZ!`?^+uh$NuDhjwC
zE|wW58^VoF{MJp2^^03v=?e8i$nJ+61PLLZOcWs2wDAJxuGQO$D!}M+~Rh;$X^xY6UM~M9F#8N%>f@XIh@9M
zi6%cx9=qNBc-rZ5D~^Lz>wEA1%Xe^Y6Ol2FvY!`RrMZC!wi(ToC@uKdoprGE*^*Q;
z(o5?`U{hrae=>P3aGgThXsd{31eCosnB}0AHCSbh&PAS5jNOE4T@~x8UR3pzg!@A4
z8khP=>lr@dIA&|@rjVl(RZ}PoJ(XkO@P}umw{F|hRdQ2^Vp%>$WxW$}H-n?xN!
z9w^#nK4vqlX;p3*ujz6$Xr6)-a{N}c&%R@=HZ7;HUaRP;s;bW4r@Q*%NRe}Y+wW>$
zSega&CBdbgoOV2N2#P)4oAdUX
ztoum3hY(T9r*GR%0CyZF=D7MZDIYDvxrd+-7MT|q0Xw|J?_T#ULy7S_O8$x-;3qPuBmt^2_sBlH9ph{G-2jsdSX&i3krUB2%a6SrV
zct&suvNZ0hL*#Vozrxj5eV=6l_rF`wyLWz})Sb=C2}ttjai%I05H8xvoFL_Az^L3qCZ
zJT11}U6GWK1Q-+qp4fDUXxutOLxoezoPAe^?WA4vg>z!nP;N8VAXFP@2
z$F1M!&bbjrz}ptaEQtXB)7AFJt9@R}0WA9izuOfBILeQE)XdI{4N5M>@9QihPpWJ=
z4O_5&VTiMtP$5SriV$&I3VuwPwd4=vO1aG3cr_4~Oq
z6y4R<>_DAiDurvstT)VzK)>=XklNIR+V0`NKz}de@HRr-do2?61*=l*N&uG|dydis
z-F(VC_6Eow{iCalx}ldCNTEd@t7Btds9a^^1Oi@Mszvk3eknP=8>@a?&a1cOEAiax
zWzkr*M7hkO{+{|)>-y4+VfRm@2->~;{Z_SnJUGqwX$59ZLAFLmQLq7xVNYhNKwpY{
zB3KNJtMoP^<@Rz?_?hQK>&}ov`kH%o+$^{j!{(~H^;;Ox8R6t0g%vWM^XUQE`yTSU
zKA_Z~#r5kUkVt1dqL(AQB3_}Vcbft_oQC%khX(A@;&(_d_BZXcxI!nr1X`o>hf$(?
zJsw*ozTBo=feMs<$yswiF?kj2yjL|cBKM9y!=w=XaIrn8S*oT-wgJ}kdAf76t->K-
z*O#sM3Z!G@xIaT|m$tSxH(UcH93iTdq?=@OgKS=w4jZfM`-$kN_tQTc0wd?DjIt)#
z2zY{)etgn>^^eD?d%fFNx7B4cSlnyJya8G(qp>Xd)$vkV7rEHShT7V98%uRosK==U@>rBs*VU9vk2anwnCrWMCy+&mu`wV01M#W!C?M%yiucFxSM?>sq9YVcEl?
zlE+dScKSd6s4R
z@z2>TtFD){f(WU9S3SPo&HUnGGXbY@gQ=8}=WIBo?>b@}BVzl(Bz9LzVdi9oF6ZLZ
zuSaY2<*CDs2R%5>a+puEv;9E!;(w)X|AT`4bJ59^Z+>_wBSu6CK^MV^7yH&7q;ouQK*0Ks3`V)nE9go}S7!ZPiP4RiLk=6lttD
z$NI`PNa1naCdnu}79Qvs&s
z{D|Z%-D<$44Nk2oUypIPzRRwR0CgR4Y{u^S19LJ;0;Lf48F#Q1y59N#eC1ms3M~0j
zgt)52=qehkLINRAcGqu3h-1%d&~ICeN+V1gTjPu=u!I!w!#)hp%Kz$W#O8K&l3jvL
z)91PDO6$xy%$kXZ!iUMy%H30?`$!ml*M|HRbvpD*P@YH2Y|u0>RaiPj&xt&G6W0nI
zbs>R|n9EcO<&7l@`zzH9%(}MqJA){o7cO%DRTh
zcW%+=tLf*5>*;9zaUg{f>6IJ8fbf4uo0XZ#7I)Tpzni6;#vuiiF;b;Zllfo2noXCe
zHo#Stmq$^b?Pg_+L*Xu?5^~VNX8P8Kar^>cD1W&(=@g8mfa4#@X4?Tcj_W=W^bnxl
zU~{&=zZ{^AuQ4T~Z_ytd%Mq~)$E1AO0JoqSz$$-gSAY$p(((GKUu4^H8KG-A$tT5MXB5;KtIh}AaVwC-Ae|If1DK&iL
z<$@bW(C4b`5|&exG-@x^B@0zGI^~=DcZ43)yHKC3b#|`ur9VZ%w*gqNK%467?K&G@
zgDiwHoM%n0nB(}N{e*qO0{{;tvspntibmNp-W01p$p+!Ge_Nzdckbf|Ed*QVu#Ll%
z7VEPGQVMC7UT+n@Ws&q>m+k6DO69i+f$&5#B%QqJ+6hSebVlG_{-baah1j?3-a;$LFIN~Qt_wH1
zP#;EJPqLx?otS#8v*-F1T0|inhWT2DpL0HOCO)@V?+__`TbL&`rPO_ON*8rDR8&K+D;5||L)5Ll%{cYnI^ZBtef?rdHU
z39pcbcCJN$1_;fFAO~9W^~H$03sc$J$<*Ehty6J26F=oX0J>*O%aSd6}Gu^#FysXT_;E4=Da0PYb3TA&z=2LP4Vf7+bxlaon6(BHE~s&{c3Nr
z_tooD$g1FR?`!(|GrfAcG0RTRNl(#+Z?-0#wyuZc9qBU(&uFQKHfGh~I9P-AmM2vW
zu1Je=(FBop%K4~gSQ4Yf+7&uV9m<>5V}8%q^V=C6Jc|J%d79jwQ@<`DW1vA)zU;fL38T`~=X0%+j^k$3LXj9r33K(fRVaZB_4sWFoQ$}k=7Hd$HMO%1($
z3h_fkx_;L#acsI@4Vv_s;QL4ROf6F_
z^HrA$sY*7N_uL3)=Up&jqT2N*
zIRP+>gM))nC)3+}{m>mlSr+HkT-x?;-~gZdl#Ob&z}-ksV>u9V>Dotbq1hjp01i#I
z*I>cSwv}~WP6U&_HU-gT@jKaD-5;g($7JXlzg+y81ejyX$7__}Xp;OZPaN
z*}>`6KUdA@{o>CaudD5|V93b}M55Vg^`~ifFue=Ma>H{-m`r#anfUimSm5NyoQ*zs
zD!I1rn7nOrDh^lAZH&i!t-0?lP}-}nf2gsgs0C;w_1S}n$QAUAVM6+$CG{4jnm+e8
z_ui=EJX;jM6D`w$(6aCc7f(GoT*9Utv4(i
zya)ze(uER8mr7BHXWSD-(wCg3;7q7gMBVpSug#IptL1%)YF)3rM&-kQN!Y!v~u
zm7?>JN}Wq4!UeC_&;le%@+>e7AGddKLrXOHm-5$lxpo6&@KbW{NKN+@2~E3uSd5Ov
ze6=i1cryER;-c7~)+!PcaTfU<<`lbP6yf#F?SoqB^3DnH*QoRvV5x7nYhrkdB8eJX
z1AkuIgy;VN7)O{TG?VzjfflDhlM3%p30Z_5VcN?X`oO{7dP)l(vG6WtqbO+jq8=H$*6VH*6=(>GsW(X9vP0HRN
zbfNGAh7NWELdG(&8$Z~kKH3`dR%hNeKk{keOE@g`xSWk_7V<0$$TZhBLde!Wt0zyn
z%3j{oItb&dlo9Sg5;*Z)MQejGlFZLs2jb9}^ft90G>R=Z@Zw9*ds?>P+0nq0Hy@M3
zBjtwBM8)cK-T;gx>z{-9mF)nJ@`@H$NHgWi3jyA#1y(Fz9?H1daH!!}Sy??q_(J*V
z#Z3UZ%#7gM{X<-Pl2akx~;JgZ?{Y4an2*
zA0qO0rxqI>=-24o{`JdHJIiyG25B5dAGZgv{RUx=zsiKO6%VwUyj-NM`Cg)D%vTbh
zspglJ*#Tv*pu5Rw>2En4gaeh1cm7jo1nhL^lGNpCbrzodf*$bT|4XfTJ9tSXpyRIA-0WSZB!VfyCK|-B?$C}S
z_+QgIwxbTSjy9vZCdGm(0~}#C(YCkQnO*=}X*o8z;=KveKtW;-VT(0of&Z$+>F2;=
zbn||&=x6Box3DHkaduA)g%VSHt%SBj?Fj38>=)aO3Ur*0+cVn28{e&HPqX-sh_7XAxu>YF%r?ui)WLVY@xFG;bgp
z3OoZWjInX$BcW8Jp%ImAlNfn$8B_*jEAQu(f2m9Jq`N=E(V3=dnwhvdPdw{IN5Krc
zX*o>Pf*7S&ai#Q!K?O=z_?qbVf*+^ku$6Fe6=W7g(I@R+b*hakt1OF(Yj9-sBay`w
zG^2H*v6+XcE~O`6(whj{Vop|mq{&5ED_+zkt%!Rca4`{|KYM(UVtxo!Jx~~tSH&rz
zWMWs)bRbx`-D11l#%Y^?X#mL+@qfX?$I1)w)6bx}bC>L9WKh(9zfKhO1$?1xyh-W*
z9HF=|9NRn#?&RY7IdD|u5C8{+ja?}{(i&k-yK5+0iw))gYH}Wb43oe=2UzLve5|#0tgpgnwNa1&
zyXOCR+!tMWmpc6e*QqrZT<%SL^(BKqY7z7~f=qF|HHlHpcX2{eiFN<6;){%l>$0`b
zthH#~Gf6vuZ-vIOIk!=Rfh6}nZNC+N*ek5cBLyVi2ei0D$meIIX{Sxc?!51ADA*vN
z*ialTT<$aGzJG62Jl=dD6JGej=dqm^CLH#2xkp!tFViD*+$DJCicW&E{-H0!<7Qq$yOb=31^-m7TR
zbEAmcQU9-P3U-;_e3cxLSs+38y@^bplx@A^Cm+!gbszG~e$r}nagX&t+@YQ_fskxk
z3;9@*>p{l61WNGkNQR)x>h@sLU=PqW62n$#APlORvde;we*(K&+_r8`*Iv^)pR54t
zz_ZCne<9*^B)QvuySmio$$H@rZ)Nd^;zs>PF-IW4B?|C6OG?60V@s98l;F*re&&JS
zaME4ZJ8QUbL$2)R=#^97I#b(bi1GS5_@=TOgy}JSF8QouZbv|Cd;TVgCQ!$##sNTa
z>;_l!_xL+QycG4T>GH~r6`3O8Phe@if1X>Zdfo{jp<7sNgxhq#AC+7I)kn-71++RZE0ss|OCLCkOhakf;r=I;?MYe@V5Kp|H*oU)x+b%Er
ztPRV&1YQZ6JxI2bL;{p-G)SSz?QnYqTh99(TL$V3Hrj$G4!{-`hNX5hka3Zslu~
ze`*}X${X~x;ogrmB9&r&fz8O?;<&z4P9vDp(uC}eWZpwIdKDXUZ?DrON@2_aeSSN;
zfEhrcXeR#m#RdV!eEj@>6R1TGfH(V??tn&8C_v4eWPYPhD`c?Rl?+;noNHYEQ86E-ulQ-N1*(!V8p8
zlb9iR5x`{RrRE-$=7jj&{N}ipxR4!u;-HBR{~jT~57qvAyP7p4{yl|YpLAFxIItP-
zUB-rJ7u7|j1~w05ngJ$dNhen50z1nQY$A+nWc_@
zZ}t0_RP>*En|LHIMK}@f@AtZe#a1%(^z;o6*tBl>PBTcP+BgvizY&RlEaX+boA&_W
z>=x}ON)AyIhswv4@gHPqkG<=wWGxVXoYd-0OT&IW74jdel@a~J-eky^#mH^@e8CeT>;ctKx&{FTtwpkeX#V$wHzu>~nOYWYtI|4b7|y(TVG4say`{`U99(
z56@w-P}jY#AmvpB&ZNIQ;E=4g#L9Um0U!A|C>H=lv>CP!v)OlkP8W
z=mdV>;iFAjm%;+*!n%-4hJ5N8t6>7T^*=a)5-t<7I59VkP(L>3LC3|%*~Oo|@L-7L
z?fBvHbUft-Wz%%*EM;7YwCOc6eZl;6}t0#4p3)cBK)dW!$xMmx|2abfMAEHva_&
zKpHHfu~IMk3pE`&Muz=?wLn3uwecV8PUb>Tb$fZbr}_TPG?Wb+T}7E0Cb7QPOsm1F
zNn5CbX~t
zDQ5aJr@p45WJ9FcFVWkK4hb}7a%6J1Fk9@(u~t^yDxuj^(na_AR@AN=1Ah-JMOp_`
zIA&WrBdU{u+Y)dtPvcINlF)3?oU>EbWO=p&>?VX7fWYZs>^}LwJ+YA7-FY++IxZ@L
z3FGy|nq_(L!8FB=+phT7sutp&aVz;XyVVHF6|~?fD^jX5;5YB;9gI9i)wS6!
z->_s7!f;n=ohbduuC+{TJCaHN1XGwvD3_({Zx6x}pgkiGONiP|+B?gh96C}_#C+f$*x99*C(50Ut^s_lB~eUpi^;6(30#c?K8vMDc^qA
zuO?rh$Do^2-}f>f@SKK(^=#8ska44pH;Spn~{(o@3r{0&v|zZhxb~t>mem^
zQ1lXd3Bs`d6G;CbaN0lIc?WEQjolpb@0k4$99Q+$@P}7r*IKt*P6Sy5NS6uam7GjL
zZd>x>*v>0?HdJpI`XK5=L
zhYtw%l=WX8Y3&I>WP}2FUmwg~LdGHYpC4zkWma<|S3jM$_eB#%-?;5fe5JCxvY?Wd
zk&%hsVdRJCYhBr3vq7$&pM3lg74FtUFl|+jw&Eo^eD8V8HwTasJ~f~b^Kz~B{X;N;
z3)?0m<^|FQ5Ixz9+8RnqthsU86_}X?Pp7po|aZNswa|))~$<#(qHJV0wAI8-RU|U6(qtM=uR%n
zE$0(5n2`&>V7m+u?};t0`Up)zshaCt>`vs9pM?cKoS8naBd>$}ennu5j?*N42RG5%
z@3Z6u0?3K}Q4V&dan!X=eIUj4Df{wR8LFT!TLu+X1%>mA5RO5vZ)~ilSFk8WCgSDT6$q^ZF%1S(?r0W8pC2CkI7+R6
zd7%I;Y5NeExB(~nMp-%X4)9%2{UM?=@!3fMbh_5TOu;Na^CT_(1_Lbr9NbFba8VJb
z9~jBt2FZ)W(69V*l!svZEi;lGFot%>{_?)>@`aU=bg
z0gDKEGwMHWWxvh;vl5f#S#`G7-RdAE{_gl9ua8H^l1dHdr(0^RBsV9cMtc*;^zmUQCrrhLI-B)17Fnx2fvNZGc
zKSP5xKO5W(4uzgX>iYfC(L0Tc3T3=RVd-)dLsr7If-t>`OLKvy&
zOCx+RUPA*1mbc}Y+y4#*PS%A9-NI@9&5Hr4%;0u}_upvXKa>ruznpomD|)
z#&R$bFehP<@C{tM$myNL#}Z$qEr~MyQdH>8R(VlXka>XR{~Cw^XZ&%W-VN%YHMLN}
zdnvCwsYW8}hd)Wp>Fb7mO=r{(f9JjNEhkB!PjKX+47M7!$ql#8JTqbK(h(%Thv~R~
zVVoZ>g6jFDktQRqgtm=>#G?}XK3BUaZt(d71810u-meGKB~ESeXlO67VrkM>3~l
z>)Q?5kR|Ck>XT4Y5u5(Vuj&pRW#A$DqL&9-7tB#k49R4L;Thq;78i$@esKo`jR(;)L_*82cPrj6G22+UF}O@-jk^8=>LOW`@c5+zXDDtHz79mWZ&e5
zdgXkzNvHcxR2KVYm6WFDbef63&qfdYty6*%%mESfn60dA*4rWxBte5;E6^yKBK{RT
zCD|(x5ds@t!^?!RvEi8Hi-7G$`_$-FsbzuY1jChjSD+&&P|8M_rr=tU84TF+vcxQY
zlWLZzGk-a?bj=jp0nUlhJ`i)nIds`$&@R(~8h`1$9D%Td8R!1Yyh
z2H*@v_1qZPzk^zU<^%NqdC{R7&~!%88C~zrHVBPBY*Yt4R~(jZ04A_#0`~IC%Ii{t
zPJcgb?ODK1WY}4maQ^V{aef@N1TapR1yR3&e+-6HCBY8z*ho;rCUz3e&;;}JvmlX|
zBtmQP@qM|syFjC+Dv@ORLpZpEfIU)V)yB`y56!z7C;28r#OrGBdF6+Mo`wdu#qXaN
z3(*Jm*S(oH-`MghDHpbDYf3&26-6c)lM4Fud??LB5H!H}HUe#R38>o?epi!r(Yp~E?5BVG{?MNO{)X)z|P}o|H4W2&x;{oTKIh=N7&OIfLJr5VPRpv
zfBy!25`!?ufEL7Ow7asamroxeDHDQ-`8(Kxe+zgvfQK-NoPnK_v%fi(RB#AsZ?*zB
zt{>SAh!|no1Q@w$cx3ntv%@@oKErZe8zR69kxbd$
z&L(AB;-5(ot^5`Y;VH$W-47r6LwKH)k5uw9mdWeKdWMpQw?v0$U>e{%dc
zWVZ)TAp9~3AEaeR5Y`NN)vj8uDDvyw`IfrqIUIcqj!arGzts>P3zg?tW%KI6LOn#{
zlCw6KyQ6rt_3>`6<>=y3N*xJ9
zSKTohh)RfwitJz=O6{$0-l9QeH_&gg_Iif1399JpL-YMJ#<6y}9Im1X&p;&wt{JM5
zx9i;@9%k0pG$(XL$*s8k@f9CNbNHvL7LT~A3!Z8f9y<7}`D6g!r{={sKYU&r71pe)
zhtP8o@8u`i8q9yi3X=UoHVE9}x>ReSV4rcg*7-VFsKBf}aJ1a2NdTwKQJ0D$1m0Dt
zYSP-3k0bK|rY9PD6B$bE;K0HUYks$tfSYcgj1^VL@Ru8SNSm$$-+1?^;eF6L-oAar
z!f{M@s61N*o=#E`HmnMs_O9c=_AGxFXU)hxuG9p6jt3Po&5=L{afUp
zD#_C^duG9hkhy@3H3LNv#T!QkNdgIy*lEMe_3dzRA75fCO?G{C;SKwUXE|YZTJ#2-
z43ynbFVaAPQlBxlYDZSSx>}vMYff(f8Y`vnhxM)aCdb8rS-rgeyr+B4NAat>z)1@e
zE$YG@loNs_~
zO;JL|JY;1LH-?tyO#+-Gc19c1jdf?ynPsiyeC`2Xvh`?HW*W1XXp3iY@xkm^MYgaV
z4SbdP@2eHScX_DreZ>*m8!O{LEpNnsgd$|U%1H_Ju*+ggV$v>;{%
z4@82=3AMa}3CvrfmT`8#x@F>X-v`uj402(wUjqj|mmEfKULRNm4P=4W*PMNj*f_kU=ns_1_S=L$FmUf;WUm7nyNZq0e*+w8RoxwH{r*z5#s6&A+OCAw@Zjt
zm)=|cysu?eNpTdQ;FFZ$wEB^9R$UE$;-ygnY;5dz9xXh&fXi$-L1#2aQ%C>JkNfsp
zV0J$;OO|FGO~_#yf#Y?zE|uPZ2+S*T@Q0bfjoR9UuNM~;)hTK$dyLJrDlZF0N*PU1lM7?n;gT8MN
zoq%*;SepWHRQh{^^V9Dzg-9-fL}i>%jiM96QG#`5b_4NXba$%^Tc6@PU5x5JoG-a_
zw%g20Fxz}26LyshNUTWbci3)dWliH;Yqe0=zdQHj=XaUYsy1pIqB}m9
zq}Q7fh6copCgWAsrKeJiu!<)XQRu+?I*3vcp&ulh$bOKV_-yB4Z*)nG%22Ym5s#!7
z;xhL0r)3tL=^I^4`7eWuEv*{*65=HV`yTQQ`+DtOU87lZql;$DUqreD&tDPH3y8jCg;zi$Y@N{LdXa(n>H#+WK&?X@^;;GY3BQZd1OG
z)mty9(5MJBl4H~&mV-frjuIUW1YVR5zeD?b4=d3|R;5u^ao$bI+}6N=7xIdG--D+;
zHtKrFfQ{EhEGKcuo0uFKDJwZm#jdzoVqM+KH!E??uM-~KY;!BqXBk@CU-j}GhPAAa
zMe(*(&bW*w)%Xy%J0r_j^D^R;_BShuLovdFs|=-y@WUL7-Hp8*8&@f#8~t7``+Co865`~V0-u}Ef7i|
zNkfsR%id<`yb)}2DzW1oxuGyK|Dh`!K
zpZ8FPpPq|#P4+S{fonuJHEd0Vnmg%y*sz@UB4|H-2Jg8FZ3vOqmSPB4D3J?4(0+f5s}M#1&&RLE9N|MRQkWXG9}+jk5}HelY1ZA4Z(
zA}1^B;r5H?AD)0cYzP!*LWNSMzvKGeUQZg}wTS;;|H(}uwRP&K{~e&m{MDY;yMoG2
znN{I`XY>GeGq2Sf_?P-9m^GVHLFB`gpH98U1_SYP&Tsr1ScEF&J{^m6&pc)ubrQob
za=I$0s9+^A2im6JgsJ9X=Kt_*S`3$Cz
z+T#n4g6&aGGl97@Rpz@_S?2t%c;=qp*}|UXPL=uTZ@M;}S*Q{OsUm+`rt5mKu&|hB
zy6m>H4>hw{yK}4+Zy%8WN_Xs_sgz7vElgR}8)X@;5Jesq)XnJ>E7W&TYc|1-B{kpa
z2iDWMEwCnL8WL}v9=xakd6bcaE@X?9=Sv4N%ZwV+o3iVIpJ45)b?|!xWQ|Zx=4F-25A~uH^6cT(q|L6dFabtEt`q!p
zu~^(|h+4kkU}~&FWy@_PoU-AO&mxT@%=m#;A>Gp1%ZsKG>(j;PUhw)EGr>1TEeY;;QDeJql$N?=>(&)poBVS%i{XP0_!^rLk8j$CQ?P9q_fy$E#c{F
zhDX8SvaX4eBu}^Uq|dM4mEgTsBtg~*jq0Rr#fmUXP_oj|Ojk|s=D93nH5`S1*Yr+k
zk!wlcc36NiNUE*NQ}oVU9K9yPcNf`Ga^3#G`9ABIv3OiPXH0<$JAoH^O>|_Zd?*|J
z{w2^dv`>8u!}oRe+69b<$eWHo_yL~?Iw)qH6q5nQx}O1ut`+cBp*>tk*L=&*@_>aF
zqo+$d4HqQR)-CLK&%+Q@5DM=W{F!kJ3WipRIBV=$jOz=f7-a`UDqpgkQHk#PHffoR
zbs{sK!+rXVuDZ}RaqYynKKcle0~~}VCu?YYXF-i-T-4<-sEnQtEy?xx?zA9`M6`ht
zI7>aAO-VYjG#9q0zBg!h0zol*9g*)YiHm-tY`bZzO?|e*C{)A7)5MEHa+TsB;@ucah{O
z3=)+bUEeP%MzW6dOeU_1D$5!4+#cdY{*ZlrE!7p%=`x4J)&ff|h@U5PpEQc5hKfV7
zUbIt<%I;{2OPEOAh@6K&f~7yGLo;wG8%yL#Tu4h;w;9fX|7SV);=jNOe7*T7{eP_*;p>2h
z3-D4Bau^DIB9$bkO?Q|7J5GMTOS4Eh!>}&g9DCD9;6}LlU(|%z3SDy6%_>Af^Ei9P
zh9BJ#KTw#h;y#5g@LK=mwi?=w#9h;Cy-@7k=yX~Ac~NNL|kk$|J
zD&VQLJK+6W&EqQr%s1P>s#>o9!~X5+Cl{a-S@g#p|4aQ|cI{?jWqrJ^4)6m6+WGNl
z!0{zcE2^ImgWY$z))FaP?r`+5n5c&+_Y^C1L&x*afxcaT>0iJbln`A1184}D|B
zi{de>V49$fC9FaZ`_0yt=5l(QSXxpg=xq&WVl#SAFQ6kdHa7l0bOfI}egNCvWHs^^
z0DCs-8GLS24qFWBM5n-Ar-1U6djO^2_Hf?i=4fF8k9Qa1I4+5~-4@c2}
z7hgNO{pZ^)$J_I*{s2Hi(b;Y3C+0SX^GwY27Z3+F@dY?pD2Yr29!Rno4&WIoyJfo&6$nN-@(%GCFYL{
zg*JDE6KQ}7@z0n}h-(g5FU}p#hf+%dA?mMCCc!NdDL|}d#7^oS*rxo
zI$2mzI6*eI0#p(2zNC_&2c89}dlEc5ahB9GPad@;agDszQ_2IG<`+zB+zBzC3pyhZ
zrJD_P@d#i{FzjMCgeAgi%Fq~NN(B3PyE)=b{aeJ(4C%-+w*t%en_T*T7A)@Egvl3V~P
zS_zHxxRTN^wl|8#5Lt25<~60n^WeDuQ^1E93hrflGF`9N!+@9gPp@kMFOTX07qWvE
zzpHa3;z_mT^2?iuqR<@ak6V
z5d*Rt_?~mt2FUIgRaSUl$IR}mwIt+s9NSxB=HyKHYiL*m*Iraa66jx+8KJon0UcU3
z60Yl#_QbPJfU-6KUTgbsZqDlTM}eqT0oc0*BBKr~P*WEDO|CJeR0gc82Rz(&d8j!6LM*`Ssv)H-oddXEf?r#%jqN%MScvT}t&i2B-dlMTnh?u@l
zlc}ntt-^0%X4m?z_tG3_XOl)CjyZf?qFu+F+d@vdZtHm
zhLZU2C;=>cqhxoReiM{5|
zV!w@rZo?s`54T44n+MIh8ZYIn5M!1jCx^d3($~#yb@J2-ru46|xSygo{H2{bUSd6(
zEo?PMI=v@7o&nmLCG1xt6LfybMiD|S-{s|Nbex)`RhVdZzjsg7^V}K2*ZV6d?4h|@
zRlPjlwo+GSij;nDNcOt?bV?5LO|JlPj1q7fc45&UoppzB*FzrcLH|_R&QCJD
zsQT7qE!CR@Ke(j2IE8qr}88-|Veq=@cmAeIby_p!7jLB|q87;#
zq}PX*Q&`hp&u=T02{-}QX7laQ1LKZLybL17p1M0efQGXLymu~kM7)of$R#?eN_Iyx
z4;m|UGK$1H?EQS|cnHZ5a+1=FKSsVA(*VYtYFeJ2{JuHruVADbaUqdU9Ys>spluot
z*-;E%#ca$%&p?CrZKf8>d*BhxyXd+X_Y)GyeSWvoNRn~${3#T;>BpB2&M)-Ru(
z%M12{0SGY6-8r%lhC-4u^j)t$L7fCqD%4E|jz@8b^_-r7>iWqb19v0-p^?fbouB0W
z^q_7ru2{aW7(|l{L>08y8340EX~9b`SHV_NjCdJ^NmmxG4ZTK!i6kE)yhDghf#qRF
zlA}NGI}jYn{PB)fdTu?77k71)89IGxHsqaUoq%&y@?3m;nXPaRw$n$QYI4`hl52Sa
zU|a&pvxVxct#!Lk>|4%D9Z!Fm?C@h$&t7MVUoxODJoLK<#{B`;DRzTe4%pc!`C2;G
zOToulwg78bk>m*+IVdXA*dxm%P?_}lSS-!jV^~L>x7FKBt2FAc>Yf-o2TD^uvy$Rk
zqmoAnj<^!G?juoHUxoa4Kt{5v1V0`m5)-XtDQjfLay+1Z~O&0EJ3d`4W+p7Ghw
z(kvGfY!i)E<#lirDDDg&(yMG4Ghr{*e$AwetMnInoX??2Bg#nWa!|*h$TVqyI7I{&
z0do|N>?9VMen`MbpjxI?tKB};{ok8&(P3059q^`ojf4@7(m)du@jj8AY~~cfnNVtK
zH61b-Wz|%I15R98SA2jq6!2qK`0T`Ep5u!V!x=hdENID94p{OPf_8hG^N#EZ@nZ82
zm4`a+q_AN2z7?f{a)j}jEpKvKBvB>C?V4d772X|Kt0HPMuC;l+<*)XD?H|FYM=V`w
z`4`LL8J=z*dkDvwU4tNs{
zW9GXuDK0n#wF$SWGEKkQq>_-*aBkRvlE}e0D$g8WslLyM{j;4jC^2VerI!k@5I;;O
znnC3$j}REM+&{gyxOb^OcZWEZ3r|`^dU&`}>3JQzdhKXW3x54icWkfu-IOU3NRq5O
zY_~Bo1wt@A({?k?*8`x?gzOYRVy5+<9X1-L2
z#+qg(N(}Gk!w09SJ3L^cR@zdGiY`9_Vc%=JdJmStezQD
z;?lahy2?=ZfHeZ~@WNYkU&g^3(d;!RG5uh-ov>Y!>Xfs5)TN!xl`@2iwftV!C}M|(
z(Tk87EIYUAbI2I9M|HCIh9?7TK9R1_3G6o=C4`~WDI3tlLCJ(R2=&Y+m`Rw$S+&Y+be}Kb>;nPAzjZ>C&2tpgnLM
z558ESVsp8E76CIgV~m+&=V@-B~*|Be8Ntp3Ki>~UREa$g?
z!YcwQ%@Plci5UDv9EKTs4zBTy%69Cq%7zK9Zzs)Wt
zfR?pjff&)wYK@dR;^Qj@K;B{{7$=z2Ci0_~OU7R;KtPM~y$xgl923jqa}r(Trj@R8
zn&co58nxK@aR;bN7+4(50xH}q1@RTIKu*}P!M;>L;Ex~&$STR0P5i-6F&VWq^7#_R
ztO%uxrWP$R|8@pptl3biOL=5cm*X>`#blH&M)1UU23LV}wRT3O0;DBqDAl-v42M>$
zBP^N)84;SoHz-a3-v?h;_}N6gFuO@ynt
zu1xR9G8d5wHa;~|eH0?oW?(!xy0-~Z)}8~3237%FrxQR0wBvTo5L7f!gh`8oSmq)k
zBDy#_MVVXo^wT_CCn6#uqFvD`%3MT5L`26Smbr+Ch%Sy;<{}~@x;SE)i-?Hm;)rD~
nA|j%TBbK>{h=?wY@4oy0OvPIRYp3DIp<
zjN-sK@B8k(*Lv0ygexmbAtT@;KtMns%Sel>LO?)MLO?)L!$E=n;m-Bi1Ajp}t4fJN
z{2C_Q1z)@}7f}#_fT)N?d@zIoU&A{{Ydb?gpme;vAbae;n?OLM2*`+wsC(!gHp6>k
zX*9Hsq`-(m!IK^|8qBJoiKB60p{3!qMIy#TI0Z!N5e4r*U{9)u_^rd>ysqCVr-5d2
zG{nLco|Ak7DKZC7Lc$kRPH7@S`jB&>Lm2sEPmww|KIr|i$($nz>2&{?dBl2h>RK;mbAIpOL+0BP_87g2*Tq{!gv0i?*KI!|i>c4-%GvY|u
ze=GWSxL9Gl9)dhKHjkj-3&PsMQcz*iG=XS9Cd*8y!N>Nd<|8GYu^4p7f
z`uqgJv=+Y=LHcD%^X@1-pSz2>3gb}M5}KEfcJf@k_Pnh!A5l!_KE)wMCg8MMEI9r1
z2pdDhmyw)q*+rxBS*@u|yK1X9o*ez}Uuuw{OWz9~;C%QDSNa)7e91G2%WC=AvO@PJEL~b=vIL
z?h$-vGh1#sQOI{xN6y5g$e`}={PgICSeH-xazhvpt@}E(p{OHS0{8o6Teiz}r)|s4
zKU0{6%I)oI6l_Q*ot&K7+uMKDZ)G&N?j_M_(6O0*Z!yfCR1r3dq+Vx7X`)r`BND)!%63HmCo}6WGo`s5&P!k}{`9wCI{@E7R{F6)Md?*a1)-pKoqReQ?xym~E>7YHPr+*X
zhwI_*nVQyIACoz0ENof&?L{7wp%I~HhGT7QZA!V6q~33`$v-~)wh}kl$PI*bvsuwg
zJH8O@v{|fkJlp6~;y>-|Po~%R+)OT1_|kGd%tz>Ze^_hZbo%-SR0XF|XPCm5caMJ0
zE6<^8#rmy+KYC^L^EzK1?r_$9k(@OozJKLwH>sFz=o&5I}2Y&)#!>_l-3YX>o~xv^
z>_YIicIj%`}y&jmX@}cZzG;L7#6PBrv-K^
zP7UsBT^+YJc83H_5dt<{nuTN$l+50A>Av0~8K%+n4~Cb(CYke>-@aD{uD+zdNg?hb#mKGE_=Nq#jL0O
z)~84e6|HCjE~@R&x1Yj2kn|pC71gS!B%eCE&S4nn`Yb-^R2M3|5F;#
zkJAYyA+Q-Si6kS%yWy)*>`AKN23~)+V0(E&DxW*cmPL(;1YOW%;u8}wwQCs8K3n3R
z{y9M0VZ0}M^S7j=B-44jy`zJjjct5lyae+W7AEj5ng0h~mmM?&VHC(NG1`q*pL@4g
z5jq^1flE|E$D4wabGY2h=Y%}xve6sQ{?y8Lu``zcvc4qv7U|x6
zQQ)Y0fK&L^V>LjE*oUJOO#yS`wiJ|{{D*fpEh5lmWC)uK-}q|IA4q>1-GnZJogQ2%
zjzl^MYKmFc2~_AR@3r$$rMJd6S?uM$2^Cmf1%W=li9!V$#eBPe%?AE;>!*Eb*$4);
zx!OG>`EI!srllHN0@?k7*z$4~3Vgcl)EvL%s(WgBJyHYJvJb!aT72%&-W!Q9c6D}k
zc6U$z`uL?jN34Q?jEoFT!=ex#lPV5jH$=Jaw3|Ta;iRMB{8bz|oa}79b5V2OKq@ma
zkCREG+u^dudNkLHcOt@jY6Pizr_E7;E8CU_vJsU&4dcr(
zY65c{cd2u{&z|m&{mzHD3Vz$u=8b$m1b5l{1C*K+jej*^_2g%*BB7Am-NkNK#R`v;Wv1=*x1;dpkRW=as;+G=#7P+z@dDY$axAf
zB4N{LIBM9}EYnjEC`QFMnl91CNK^xLNTqDhzFv5(!-y?WM
z>*kBAqW`@)UFYH9=`6yg;M&?_SufQ){1ja!$q0toR!PEVjvQ|(4OjXPDTYZ~9T`Kbs
zd!Il?6o4CmhD8zos+?|J*8GO{st>H+2PPdV0E0YyZ40^c*g1GhWcQ))gs}oR&6QqRoDm!)3qxa+aO9hrigj
zq)(#@kC(o$$OG*yX*^FNvOAi9+)kR$d28^v_4z49$IdYlSGXr_NZD{CTVMkeqlTd`
zTxzx(2Jhd$Po~oV`&SOS0`s|M!H%5DZOx$QX^ZQ}Azpv?zWa(#s7Zn5-)bI^x@luh!x@_oSVxmqgZoEWY+NB>jwZ+AKRYE&L
zQ4>=jnwvmF06SF`HoWWwLCHcq_^#|W5Vz>Jxa^we_-GazPE71tx~|i^9L%aHX7PKn
zSza}LpOvMQOF|mp_tnY~G?EITq47A2gg|kE0y%
zWL4I>O)U;}mg>}==~bHa>F~Kmg~$BTYrf;0(OsAO098R5n+_00!_ehSq{_?b1lZI-
z=yW^BPm&SxZ)RpDPnB6-o?<3*3tlc$t!!WjoHJFLhm1(cx?XZMBJUlctH9vj8zf}iN)wt*j9W}-G
zS&R)OWd2s=e2A}s^y=S9tls}CD`es%&B5dI<#va^i1@^vv^9wfMCHHMqUqojDLSSl
zCqJH#2!$rY|6Dowan!o6U6FT5?fQb$@@TZU11{b;ykk7vK((GeSZXHV(fDCv_n`aj
z@_b~DreraPjbHPJdNBa0$xIr&+q#OkXPZA~zP}~bzm4HLp44Fa*M+cp#q9r7^IY$5es#IRZ94p^OZV;{$h5!}HNK
z_K2T#(!ZKNs+3J(&=%yykNypIX*rvrxi61ekzE!B
zR=%jpn*4@-ODYR;Lg*x_fCy%%}|Ggu2OwaKxp@PE4`^Sv*=S)`f
z=|M13h>J^ZM3kz2%QH4|T0NKxJ2~m??QOW+50MJm_Lb7|-UQRb(Ngo5>TRdtWCm@r
zZ5xz$O0IwRnna(8crjh9R`iBBO~|=B3VDB;NKWB#b6X$uSDzITWm+mKup@kVvV0m;
zy*C5I-uif~w%G}M3JDHQrTf^RtGlUYF2|r>b-l?a7=7kO($s6&3y_5nTRQ
z=Qs;1R~qP#Hq=RhSs6k6aLxLBm-~n)v%%oTl^rZ9`$4mIre(42_`56Nt0cwnv$Mf-
zGSN$@T6!gm`AcSs&7T1xpNa92v#dtyA%+?F|6tLr2eFzF;qXQHKx1##u0b^?H(9Ff
z#gm)1_Y?AcDsf3?yTs?%IlW(5alIDEYIH+JL=+QEj-;C@k%&yBRs;`R!1sbCl*fD|
zMKr|J^ia?Nt{MC7+xXc6%lw~ZHBN)n@((Pa0+#tuP*IUVrFEqbY=)*kq8qemofT-6
z>E%mCBXo^Ge9cxrzvXgcRSAhCy!$o<
zcFp6aP9rUq2mzd(QkIq4w6_C;f_R$06qg#Jzi`lgY;c+HON2!w>>chG%<|!-`{6Aq
z`5K>Qb2QhFQ9C{*UK^aI-mnusFsy`pu|FKGP|NS^tgEX_>IhZBv8XTsEdX>GR!l!o
znnX#VlH|Gpmcqoq5SDOty$bR**NWIH!^NpmISSU}0LQ2(9hiuNh3
z#xv(4Co6rqvmHyYFDT4IVGjk<2|I%u#>RfF!JK!9EN+to){u@(}AZ1
zw~NeUo7&tlNFHS`4*^(}*!-W75W9=jPDETVb%
zM6LZw?eeFAinra`Roar?GB+n3sC;iaD5jb`&q0+W=P@SDAg^SVC1h==a@AR!dTk}K
z)G0l7W!30hP9FVfGg&K47&p?~+v^E3lcYY2+wrq|A2LNL;Te;RLrgKLWuM3az@upS
zM0Z9HH>Xu*Lu61q%1qc)N{Qjc@MtOOs_DBEMaD^Kg)SoseSA98rrBm>Nn%v+g$Ptg
z+QJE?yyuSkHBhjMAHw3xy$e&*;IhmUTWmR8_S_p)xs$?IvSsdZ669&i9VYZDFas=7
zre3FeQN3DYUTm^oWgLo{#jvwbx1DG)7Ef`~f1W{|lsfB?7TgHg?69wO^*WU6OZ+5D
z+cantTOFCY#N>&g4@HH|Y*|uVUJ(Z^_=nvnSG;P;Y^1XVdqvH%#RbLm%
z`@@fHXbP!tI%qF+oJ9;a3M$BW9+ibL$TGasB~f
zPVB6p1u&*NhpFt5JN7EKt48qY)CT)E&R=B)gs9P2!W$=LNT;c_(6owDdB|8nWuFq=
zf=|JTG?!l|Gp-;|2tJfViKgf7v{;+1FqfX?P)1R}@UmRzXd}jJGNh~)@%C4Xr(6dLj|R7z#z}Aq
z;R3lqw9Whatt9YBU7*O6
zpU0?J8Ij3}D#lldr-;!`zFmpwASg)gM@zg>m2rQ6Ks$N9=8I_L(U(^?{#d1Y^Ydgh~_&%*w~Oo;}4i__*Fr`6;SwNUlswe^mU
zj?#m(5lkh1R#4Xg@?1z&Jy-ms|B+pWv}8B90x{hGR;&3?~M
zfT4+pmjpv6tkv|OPYu;VZOF_|E#|opa#2~?2(R#;whJ|?n0}{Rc7u-$3jhNy&bAC`
zFGFuH0sx-H=N@4?S7};VD<`ZQ5RU;y{O|Qk*qwjuIq0||{ayAZ!?!Twcv11##49(m
z$Y^?T=xZ%iZTA9|z+@$ordX`k+^~Rp#VA1)%bRRG|4~E34f}bNs5KCPTQy?Tor+Y1r0eaQ^gk+&XKB5H
zNLYE&+|aw()=+0OqAF`Fat_>(-;#`$Xi`SeTBaKnX`X$5Q~awmHO^@~|04Q6X83*c
z`R?0N_{2jscRlj1rP1&B%LCkxk<2NuYkD$pSR$9-3kdJ%?|M4wmlK=mD(Xm;GI!rM
zkF2i|W$Zold$W{W13}dpatf|R&>7L8|IGR&D_?sbhUr@3N
zRMdqD?TBmrx$xEd7@5jDpbdz5lBRwt_{>rAXU8*&NxTVk&exL`mFHOw?||Vm?Z>LO
z!Iq#!Qf$^3mnv5Mc0+C07Bjmy-rQ4~Wq&+=sOW;r?1DQUq181zdp0)q7ag4{*+j!2
ztyL--|8kIh&dD}MvhJhvoD}mA+&dy
zTkhwI`JD^5;KYB(F2xzau?3++exQ@FxCeWRTc;Pc>O1@gwayg(TH3I+5@TXVM@Nei
ze!sj0nzw}-g}hY!TQ|pUZ|%-5IOaHZ?`wHz-lprHN6z7Uz1!6pa3LL($L{}l0vQzk$kQ}7Ge*ckH5?(}I99L!$UwW-q-5eGOuT^X)`G3c9L-)*?EFt;?);?f1d^
znvYpu5Dlj`OnDmhZ?7CWG#>q+!|uzGBz5^z+l+vSAAIUyKG2W=jZQ&Dp-w}8*A2jD
z$MgN8U+egFi1(wmzek&1(CQAi-=m!a_cU7DUo-Ddc5E2tPuS_)sn)08&8m!ub
z;8kD6VdKeia6PRxV#~AexbKV_f@7|Z%M<(-WA_$J$sgdot1TzG<9QGm1Oz^|{}EOg
z%|K!OXaK2I<^*Gw5A)?JuL1OMWJFA><9+-zp~v;hG2@Rz)j0g}X3;-1lL+;87nUpa
z;K;)J7I;toG|e`-J*=TOE9@?G^Bd%}7Q;7;GSuS@c9?@TYSc5i!WQU!h%ozb*Y2m>
zxixc?`a6!q({>s^DY_W@{HQVVT}03KdTpWePPMYSsSu(Jd7L<&zikO}nt{tg&+gE9
zj0sPTUp!Rxj#?{iG=E}f7$J#Eh0H|9+Nm-B{dws%Ug=x-!_C#`y9PYoMiQrR$Me;y
zLLfEs1-1AfzB$V(X6R@vErDWk2{g0vUUvmj>3T~H%IXQg%UC(w-D$#vg3-9GrwKy6
zlvJm9>>Y_I_tvyCNji6+`5TBbeL3rbCH6MhGAQ&ebm*CXZ9bHNhR#1zYr>S8CAgOF
z(HV1u&MF>cHRWW!`S6)TyVfqt#K-|uXb(-Dpkcs=c!p|MnT0QlI&;`9dH^mvrt7WW
zO{CJ(PCbwFR_|Yc`8-D0%~eSY%m&i}PB0K5y>~_>r7JczOy(*nxX!D+;*`zBzw~J~
z^d+U~i?=*n-#?p$#F?jin*T~}g=Ap4az$@P2E#JnC=d`XWHi7uI|1{w)myUDBwQ*~@0pi{s{!u|LnHY6Rvg8P`OA
z^^y_2j^-VYNxL4v%l@tK3NizqjE7M%hRS41SvS*Q41H($PSz6G>5zwJE7
z8WIuj#ZlvN3irl2*#1ToJc$Bf0v^2qywX%8N@?`~WpD6MOZ}$gDy9a5SjN`H-V{9`
z*w)0d_22B__w?SgQQyc{4$2g&*^g^
zgHCxl7GXQP&(8{zzV`HvieYw0Fvd$FbWrG12|Z+Z8=?T$^W1WFq|T6)X2!`$X(3gs
z{YU-joQ--yS{U><8+UIip-&B0%tU=3a}}Ve%y@L0>AOno@fw!`)P2YO+$24vy*o#7
z#JoOD>l)1K)vuR}7p8BGaruod8!naoMv+ab`)xVKpSr56Of(g}Pv>v~+xLMvn^^6B$a
zPVE>?_)3G`=L^dgLc3$OKD0v`_%1DY?EVM|UDC+lm>5aFFF$2ek|ITyTeV7c@#E|O
z+y4m2&d+NV@)2t=>|}d%CxfLyaMim2F_>sEP4@bp-Ur&Fel-;nk^L|oh}rT4%6%Od
z4d|9EsWW%e*m7=eR~vna0?$^eDhDh$C`uoGS);aMSE5JlRmO;g9C+P%Wiz`dAFZoB
z+xpcgfzST@%l^^qYlWWg?Ivi0eTK1b1vYlQBpm&_?;g|A7f^K;-j`k2`tdwxN2wZ&
z^2R>+JUv8vZL+v`TqH0Q&_+1HD>r}pr?z5za1r{CKBKJuKV|nv?LR_IMKp4c@ju#R
zH1Ct#KbGzP{cWl7%Xeik-^LzYkA}c2O!1t@$f53fHj+OHxiHBP$(*Kc2*YS6~rsz31XPxmm&5=S36zrDP5}w*aYi(GVU_yk&F9l?!`F4j&nO%mcpc~MZ-TF?6F!B~5t
zrR1!F^;&j!rPa@O-|VCWI_0xQlscQbg*_>=9w7OUCEnXRN(H8~oBJM;v$CGnHkUct
zE6B*?3J%Jo$k18=SprCn^+3CuKJ987Y#9ZYu97b)At{N`{l0v6F{Zg4WNT)U5ahuiV6zYk7#m8oO?&`QdB;P&M&GJ&IYph8vQD0vxoo
z@B=~cm_RQa_~^AWnkyE9^yjnB=~~yKUVy(i0V~-x!O7B_qsxN5v3%GRZ`%x)ozVcR
z-j8YH?njF01>pYPYss_OmjtH@`y*)qLVrmz_N}d1F_^{QpB2FZjWvqsK;TPeW|RGjV7NR%
zpLRpIdpp}@LQuL=p>UuV*4)QV1D$G%_}Rmb5HnfMjimff?;W6sBIJbb57_4pF%F;$
zf!s0a2jG{J6}V`i^B|N{b)I+uFixBCL7QqNWWF;T^9zUNXXBxiZ&__w_kbE8v*4CE
z1FQRmtOOHhtt?VNipcIyXp{pF$1IVw)L<5$*1;9Nljbi$p3Nh7KP_EDLVDwHxxpi0
zs3+dBRr~M$+g(3t`mv?QeD$)ue#+-T3c4c@aBcZI`v9$Kqa28tP0pzsZzFC<25kWj
zEHr8FTmga&!)!e#(1x-dK75!5W(YXYNA=el8DYfw6ZTX-fqB4s^7~)Db9yjS5^-3>
z1=Szt_obz$y}rBLr>+u>e8ZR~U0GS#8&HMHf8sCR0(3=%r8GJ;oe_a+LYXuz2Yk#x
zilX#PG9|Bbo~~DfL_~*jL|EcxcS#t1K_!b>qJ)){&Kc=xXNH|4>LX
zla!Ev`JO#{9&|V1xW*Ks>o4MZv7mv?fpzy_ugmzXmbH2eIV>xFloG!b(JixHQ(A>BQH~U0SaoHLZP#t%
z9YXj*MYb{rw%6@u>Y*_+?gKJ5*VdW_)+Ry$+@XXp<@
zTq^~F+3lZEA1nV#ha^%fy_BhNGyPwy>y3k1^3;oWn}!VUD^0>3RdRqk;p%`kAyOT*
z#2O(Fia1**r%KuZXVvR%!!|+dX$JVnpG#gco@+a_P;oQmFzInY+{3J~6)EA#K$uJWSC0^<0GJxsQr-#U0y7rT5u_~qK8Y6C>DYe6P
zMhcr<{j0&J66g`8D?-;OWXazbG0?wI_Lxy9=0Q&-cN_UxcJgZXz3?)&uoKL`gEHP7
zYU`)*6>Jn?^l2T7EU=aP*Iu0yC!%q&;j)>89V9ur?=Lmhigz
zpY3~R$skT3=za6Etc7mOga9W0y$P|DdVB}jH{6X=@;N*idHE#j2&gu!7QbiTYa{>B
z8ry}kmOI&q^eIvJ
znrr4`^@Sxh$hbKz-D;aTwkm81Dk`0?7i|;l@Wt6)a>Ey2X^`f_;Fz>43yLHwj57{s
z@cBIN?(aL>vK9(tFrzZbp!)jyc^yf!KO!Beg!YIpHB?+ju9JPUC?@r=Gn#kg9#uBz
z`FudVh9EWe#H(_`rQUY_-`;z1C6=t(D>R}$x83)9J=2D2VaX$T3!*SEFgf3pKc%)l
zY@YTIqnc?V63K_Y_-Ck|usSx8j12%Z!ZF1>tlf0l4>nqV56~G?7qF<1AKS?76cK-OfVs7rIU4KZbeXb_<`k(44hE75-uw`a<88&+exEWy!wd%JG?Aw$l7s
zx|W_N%=Mb;ZFbmaM|ghu%f(yRU+)h0R$7IQg92?x;vM0JSVv^}T5$Y%!qZw*2O$wM9K6B59C80t6}|EslOMa{Uz*jS)5G{I&jc-FB)jVtNho;(Z=Hb|XAtjGxv0|cLL)&WRQJP|_`=xoeH2mv-GKn7hk9=BA*
z`0*Ve2LK1%OYKNXUnj<<*G5OZ*W(dvucVLtZp^@t7#qLCxXnxwlqH(On
z8gJS=Tm0h~xa@d%zP4UkGPD}%bUycAuMW@`JtXsMCbb(nvWrAAEQvhg&0qQ5$c3@u
zJ`)u1KuTgUo)KbXr}cW8S(C8I5c<;d-(Cj((Qk(`kv?~X`c)qD@n&CKNq&RBiW3c!
zLf$EkMbfzxk1sSEC*RzJ>*`cV;9JP5=Me7+&X8VpA3Ik7q8cmN7df`x4O|||mP;*}
zN(5!zQ=n@s)jONhYMqp*LXvpWFvS^(vCb)#fS~}0YS7+P%AcvzavYlo>p1jz+rG-m
z$i#mJbG;zzyQ)Wgt|^PDAIT3IRAY*_z8?vW)Acd8lWt)wMtXyY3^uUP;T96zcgH2h
z6WDz^snti6PO``9uqCCWBA0xc)672fL^xSmu8Xw`*a#gA9e&@w)#u`J^Lx2`{Sw69
z8P5GS{c2#I1ie}{!fq@N#xu{u2~!vDGFS8LkV3q$W-{F&uJ3UP4@q~I-E8p09lA%3
zTq?$U_gm_GjjeV1gO*JF4i;uJWoz1>;GRYw%gGj&0cG|tv@w)wJi+n3SDVUIblOQN
z{HYAuMow;J(;1X5BqZ?ga#Vm1T+JFW!7@SNfdSYa_%@waT6~;mj#fTd|EKS1^z7Kcg&+ftYp3Q%&+7%!kIE6GG+Qbs?Y)q6B@AH$vFFk_w--ya!O9F!^*a6qSh(Dqt*MN==Er200?mQwxPQ*gauY{)oY;
zLM5em9JpOK5qjQBqZSnvIg1gqn<4R_;W4Y&j;m6#b#fbZ!qNXYt}*h}Lbp#;UUM)6
z%A(fVXxreRyj4sVf|}h&C&0s3aWE;S~nnL#*Mf>IQ!#qUFUy|fc82sF{2JE><4E*`I_@gc9?akj
z`?&C3g`DB(bveP03(x{5zO+0&XIQHEDx{5dvAiJ{+XueLk%@&a|GZLQiY}r3DCoE*
zeguaE73HD>oDOg-TRUdmOolEU$G+EmyDFdVx(VzlAOI?(B)wL+M;;&+PbBnkXWr7EO(5Jjp;TNP|D{Iz7_eO32;_umNgjH
ze4Y#aJ4EU)=6)FZXBS9?LhFqrB1`;h+KkuLV>GC`SlHN`H|5guM|>hrycG4MVG4~*
z2yFBd8*NOk;_D|Peos}>2xfMQeV0(#Ls(k4)QRDExNf0|5!GxdJ0iEU&=e-~RG9h^
z{BbwgA6KdLXG)0fr^Lh6r=AcK0?J-iG#E&gf+uaz7@>cN>lz=U-+LkRShp^Z@nYoY
zop2=xkYesh4hQuOt+a>xXXeeE-FFy`n`Vo_fWk}PDhoeLWKE6I8?l>yPhGEF=)bXV
zgp@#1rY*&Rf{x}ryLxAI!PWq^l1PNbR=?^=s*h!Y-_-{=DauuQh{jx5x9}0pwHr=D
z{NXruXSLJ9v;*q#)5TcaS5CcB9DkO`PY
zJm|$p1ykJ6LD)sHZ$Glp(zhJEl0PK~eS5!clXjFZ0sR_@BqKcz*WPE?emz6O$Gy5M
z5{G?8R)KIPGCv)PombG2OO0??ISFHTQ6lmUwt=0)>Ku%Rk}TxJy+{mAtrn4%2*lu&
z?GZ)xJ&{?G>KmM-$J)Pts?N|1I`vBSmrw+xTPM1b_HQ^g{qt}b-YY{KuGxRD3y+NK
zP?-)uP<5o=uA2>kBV=AjB=011?G
zpQuJ=vLFd@oLG>rgXrd9t6V~hpof1+-h?L}2HNvZM3CEGwZYkuKa60Wg{Xw&}3NX(Q^5koOG>aEvbE6KX8GR$%e{&sMrG>7
z<7uqMgNHi5-oGX^%Rji?3g%htyFC5<
zoJ%qh0VO7gboVk^lR}=KBnDiq@k-@`8JS?6qv=oS_F}#BRuzNfkL^egbpF8&>Yt`0
z8n)9Vc($oqiRA#TyxOQSdQKZA3VrB{X2JeCY_uKI<|AT9n1pNKgV^f;x(*HMI%Fjq
zq9{phasAAm%*|%1zHEn#;=Thz=wjy*c_({iW+nNzFwC$fx5E!1Z;*RJT|`zJn_4Y4
z4Bqx)j|}v78rc^HY)85L~18fS_<1T
zf0^AM=cMs)mrcM~b`7th9uWGGUm1NoM)^+4z5|j4$93y5ZGlBfIf8&^9W2{GU{Uuk
zKjk^6kGag$KmBo!#lxO3WI&E$oXiaXc5AQGv|`IIZP@HF8hpkzT+t(I1Qu0#Ji5t5
z!?r0bZ>JHyz~jNPJ5`jxo+{a(_+;Van++=L#Bl5fWCQ5#rxqd4c+FLliV0dt`)0tm
zA`vt;75;@;kY=RAO5EP`Ol5pszme^^g%|aBce&0)&dL+)c(VEjBfU@Opbdt{`^KRf
z>X>kRB`CaGVf&xVCYng4(3`0Crm$A-G0J>}3E
z$r5JcvghAiR%=8}8s&3)c;s@$^J@S!f75shK{5taVjpmk_4sb@}mubLpJTGV<>1$ll*g
zrKJk@fwSf>w`4c_d4XnQ`GGR`d=k#
zI9S{OiX#;S7fNESPC@r0-TNE&b^A`)#?P(GL?|5G2kNn3R9UXczS=SB)D+mo@ISid
z0n%0&1jJhJ_UI2XDN$~9sC7w_!_49kW?>0J&9b`BjV^HJAbsJUGLk9W`18k@Xo=)j
zNjWS=N$}`Q*i3rS>X8EF6$QZb5l8z+moM8xQ|4>uCzO#y%WRF=QMJWda_aq|l+e16^<
z%o{MB8Z`1tehhKw>&5v%*DpbRGv*q+sP(AgoxP%p5co*CIe;hFQO_m7j9_LjjWVO3
zz^wK-R9}*@y92dp1i0iF5T+O};Tb9yfMD9Nw#<#|3%I-Bc_yq~t$(YH^!1}AqR&RX
zfp$~hl`Rd92l_8uWsD(H7(eTIpBva^t>t?qo~9Cmuf6N(SHYT_WS`^ifKJ#hm19uD
z&&0F{nFC^XAWjv9$G#QOjQ&t#ffEE*?+Ah*dsABaQ(mbUWO1ur58mc!cmYmfJJ+>+
zUX)+|SB2a01?mxy-DE9P{#VM2C|`CGa`9Ao`sxyLO#PuWaMWS(s`Ss9E4fj9m>-&4
z2nY;pFY#VIK9)9%H+C5)ruXmG!|O9xh;D=U
z+V!!$dkk$7AXM*dG&NH)+0u@+Nv2+=`W@yoU@Y4%H_laV+^30o`^Vy`Jtwm`nQ;Cm
z2iSM;yO%P`Rat@dN%p4dFBJXiL
z*}a7(V~;BQQr7{iuk+iA5@zY((s+t{$lKD%#^%81dWrh#jh@hX=HcCIX9f@~n_{e8
zBnuS|`G1eP-a=c#r3LsN5JJ699{{ZFq**DDdOVZ^K8PU@tV;Y()Rc%bx_o#en%i8UKoj}MAqeC7DwL?$xQ&MiuTbCt^y=bkp5m>d`n_w7Z
zY|RQE#RJC-=3Zr$n!LW#dVo@Hg5
zi!2l&1~xjGD3B$?9Ak0ao6HvQil_i=#`{s&^`6*Nn;g%r^u9!Da`$oqW!y70j65jD
zea}(6@08qy+=%m{#8s&eHoq=>If@dIYlXGSLcgtN
zsQgh_2%IbRHeVBw`_az;<1`rDw5Jh8B-4nSefJ;q52J~N^cX4?LnOrE!4RFM|51XM
z;X^eX9%(!gNiqm2n+<1L&JaS$bjJ|g->k<*hKVrD`(-9SM9xH5o-WOxwODe#_a8QTAIiGFL#4t2&mP+u=HGuz<-h;ECP6v-2JG%xc^Z+F{69nMPF|(iT|u;~HDDR835a*+DJ5pnkT)yy;PIx5NWr+-+g#TNt8R=iZ6wi=E(eUnDpmwn_IwwrXg
z*9zdIUTyMM@Lxg~KEqZ7H)IIjMl9W=w7JreUubXjICKYEde$`KOblKEI-TNDqbXEM{9;Zh5cCV&EwE*_f
z!B5z;*4!W~4`?cUydZ8q01b3?GYQxJf<3N-fARor{yr+r*BzmZK8!Rs&Rr
zku%re`*?SD9e(yIfgvYux}{~R!PQzE+RdncU0nVd?lVw+UmLgjKAME08(UfZ*zkA3
zeP@IFhRm4tw-f)vt4ZWb67%p7$qHuwTnUgXHd_%Wu%jR)PV7G)c?&Jpu}SKakpC^)
z>xhTk4&-Q!vz2fTI6J>x<)BGYU~kPV6?ccBlfUQ{PVZWh)z)-!A&_6_?4$v_$7ZYl
zacijBJPZEk3)y57xlH;0w3`3p!l!ipGk@fM0|FD#b}Nw{+}W4!NN0~4TakrK2nj`#
zJ>JET*v@|R;)Y*`vscm#n3p^PEr5k1=vI*B;LaBD<>#HkY3KKK03V;EACk>t`1w|F
zWCej^2)L03Q|DW+=B@K8%A@c(`@L5*NJ*9a`c-{Dgyuay%A>tG0O|5cdt{B5JCKw4
zKh|}KZrHvZ^lnj9OHA?T%PaHY(cQM=)pNJy3qwkn2w`JDw)O&f
zbC{T2ao%pvVjss|%Jvt4?FI@_bYB(YSl`9lp9W%y<3!>xcZU(;
z@i=XO#3R*$EeJ8TAUXY=1QFv$;Gp%`!{r0a6Vk^`TRvyQ
zna~8NHXw0B80~{8!ET}uS<>oDLc&^#qZ?tXF%bp$Z4pUlJwi_^UH?}5feK*gS}G8r
zvznwqr~e)S$|_o%KKo*oIqF}~5g(7-TQUCxurvU@9KH?Eax!7FmCs?hLSa-qC(A!H
zegemLG(4CUA74&H)Ic?}0>^vN9!!pA9T$@WEO0WW6SsC-B5AJFTMo;3~41
z2^S*&Z6{r}h(2Gkd6;Bl3YZU4Zadwv#3Q5_u-{aFYBKUzn*++?N?!pb8zj7SNO3l(
zA~mv6YhFAT;&B+j58UE;Zg##ty~Crbq7!au9(ue>UloFbr13+YR;Pv`00b0)789KL
zC?ouDu|;LUVgpR02mh50O{^ulu?*et{U{Bv8&iGGGVBP+jedCS(uXja8zjE^#>p4M
zNoTSK@PjB!LCxwhGaj(Rw*uEPC|cwLqvqc^+OMnT~pBR^jn&2
zurL%-dXPHfyQnKA8hZu51`NIs(}uM36|cAS@_&HVEGgNY$>Y2+>TlvwB9U(1N6kGV+Rs9Q$IGV^p}L?R=rVIPKyD;G%(Ph|%$tI*O;$3s$9MgD=GtTw7ZM
z%8&8zXZ4Vb_uIolHKx{tIE|1x&)eZiCr;3qW~qEMI*^!FP#
zcz#ALQlPhl;5Wht=Q4}Vut-txcu<9sv#)@~45uBXEmj9Kp&c;4aX<6j;i|o*af5@A
zg0%W!VDky;-UB2vamX4*$gK-nW6dBoRwd1B>ry))5D2iduRfy%**z>GqEhWVUy$W-
z3*B6JkH~g?d`!&!Nrx7&il#Od6Cq`rfoy*STZqCKL^rBeSl^`dE5H_33!LZp~!>{Z%HSHxZ2t;#uUb8VB4V&20b%z
z&4@DQ8JzRKxhEKs?us)}G5qV<2;K>b^#}Faqk1bNUY-e-$6k`QeCu&X}swCiq
zSSOHj>%a9^VS&MA>f+-s{Fv7cvoEQIo$(p5a^T(Qy_lz1W06%>*(Lm(_)hiCSCb*8
z1CnPiwv_@zvnkPWsXk%jvipDou#>LvWg%Wj-IPPbM^)VKiRq4RCHHT0L6UvXkm~bs
zU_E~R|1m;hELr`
z_*P||g@{Q@4gT4`w)@l`aDJV`N$^+&QZ(V=jFgHg#LU4^yMx7zgpssjw=Dt~H-;uj
zP2b^l3?*W{Yx6yzciJEw4z3yR{XS^Ughap;mn|I}MIjKx0K51XD3lm(Nk7SL(ISe_
zQU8uM0RIw@CagDd1%q2KzCuu5#fQxeHaE~%h!#8iFq4FltPUT$tk2V%zALy0Q5
zIV4Ye2a5#Tx7AC`^WZCqwLHVy_V<`VH{EI$6l`%tMMh&FumL!RigFA2Kd(wy=~Ij(
z+`+;EaUw?7Ac_TzIQJ+D#9qxVK41rHqo@4OV}3clkSQCOXgYBSd`vf6ORPynSff&1
z0zp&R;|agfTd0z5Ilcg^OHT0?60gCR2oIfntGF4ztBV@;*DS)qlmyb55P?zvV@8pn
z`Q9%$+6Xj&AYj>-MzBo0q$P%x#%~cOAvTu}-Z6MY#`}b~!zwvbIy&)rh_OHhVaiSz
z%a%r;h?0I)*be*uw*gqMB%{|uJIQ|ShDr-#CrsuklOemhU5{>X`WDCfoort5R-Nzt
zm(pbtFc*lBM!sxUxs?gVEX}Zc-#jE@yVR2!(eP-}Gc#@VcyqzvR4*VYemvzwX!6Au
z|Aqp3Lh9lmbQ6nW)lAgi&p@DP+0WeD%1RZ9xgXg&I-zFP`D2iI~Q&uFy^5Q&YdlsK(C$
z;{>F6WX)T8yLja*1Dos#>#y^qGNK=HRkPD&)CZ~DmP(ULm}o54o~+RG$4@N{m5
zyF&XF9Tvtv|CRRLz6wzcF04`}P9dDQYqCB3KcU#U>$s>8yeOrt82%Dsn}J0o;~89o
z>&)2
z0@&aW-qp*cMu)MnZt(k7s?3o2`bcpd_?A+4#7|~XggDoQnPiMqGeyV6r;)9O?Aur)
zG7&pen{U};&wQn*pVU~661?iY&^q)GN&N`cPhBY$le7x?aeu%ER&3lPWG6P&8`0Z!
zT1|$))*AnOVSe7s%*=5Z6@BxLDvPw}?4ylrvfl1@&nH8tkq8tx8-TKv4nA&z*}m?tUWVioki;bP%udlxt
z=(x|KDfHQ*hxF{vLr^i{PZxXvN3%szWMm|))D7qpnD~h{;k^$T0Vgc+oAHTwb|OYn
z_Jq{Wpyo7k@xH`ipT0bB+lfii(NV!VoJtYUP}jj2dOrX~I5HRIxTIWX>nc-j8Tca8
z-+%X>({qNQ2a`dB-}wS9y8^=sX7OFsFIFLJsbJMmgB+?vW^%FcA07@4CBj#suX7ZB
zmRl&VX2AXhQa@pHh0~^nu#wI7Ghz>Ts82m60wF^u-KOgBF0NF4b?S9@)bx}@yzm?e
zqHbI1B-p?}Lxwr_<0gSY2@cftRc1?YeJWeFJ_j4`PoVsgNOf{o&JCQLH~IsW`PQR#ui4UM6BW+Vc5=(RySY)J?hX!1H=p96zr47ZOC^3jHY-`$z|XPzdKJ>i
z74t@pmIG-QvnY6%mFukQe~}o(OM7qsv5W<#1M;i8Ewt&xdhEUC0!t*`mW2Jr*iccg
z3a4BsVfwsJU)mk{D5_8lyi)R;leMaE>bBQet)_Fcl&YlYuQenlBpl3#^Ntkjj`E+b
zoZXr60}C^>`6WgLUo5&=3+1(DKpX@#%+$~lZqFiv2xlG8GDiHPp_tg1s?lx%mfQ
zi$Hk9^XCi_O`Z8$Q%XoRpf{Wv>3Ru*wGB^R#=>
z;gNkpy6+|4#)~ymWDU4iU$t(lxCieI-jL3s^wiC&+iF<(Rn^gu;mWUSMkC>xE)Bjz
z1f->De0?LeBA!*1W~cL|k%(JttJ}v$FXzsj2Cu_qpVmJ&Waw6)-^nW|kU0FIog)_|
zoZ+KS*iw`3#Hm}9aDsQfizK7iKb{_lO&S(BFRu6};C%G*Q&Q#Peevs)ZADsnSbGPM
z!WnV!2bjovs7#R@7cLHLKGG|^tE{X>?&&*vx4UH>lt)@?eZ)dKyE!2sCb00X(h&r5=)!$3Ppv!#
zy1V&jm^Gm{UET}n?36~w+Zbz{o}sb0MP~-Ct-Y$Iix-2ll8`a+1*R1wtvzUAf&xc9
z^u*DkQqLiN9cJzKXE+NS(ncWgLD8fj+3C;MW=;ltZZ}1e`Kw;1&7>8w3oY
z-QoW*m(_Qqqy3-{%{WlxU_8sFosTmgbgSD1I+y*^Ibrk6UEC&pz9@)h3h=^PgjQqW!CFS%h
zL`E<<6n16`4&jzD_Vzx1E&hoMe7Jz044DoH2sqnrGP#|~`l94n>)A?{Pn0?cAFOhI
zbh%Nq5gy0Q6Z)v&+j=S;EhVWMvE`Do15Y1Wmbn?5U6m}t|_IyXBV#=Tgj
z?~7m(>d})KgD%B7hYIal;K8A;38N2Kf5GQLA2b-4iPwTs@H@lrV%_9-MMwxn`g6Yn
z{?K4IS(dfc$hcdw#htjE5xQL3H|lf0+BDSW@PL7i!U=$U1FNerDE
z{=Lc;;eLtRv_C;j-kQ-(3$9!wE?G#34Pu*3Ng+p8;A^AcC9aSbfL=4CJA^W)EBA9<
zmDD#ZXr93aPS1Jp-U1T~8A47h+?(bTT~lPjy-KOH3=E90k~*j);QVj!I^r?;at_(k
z=in@WT?J^1LG|SKY^S