diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 00000000000..cf5f9b3b4ca --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,159 @@ +version: 2.1 + +jobs: + build: + + docker: + - image: circleci/buildpack-deps:trusty + + parameters: + load_docker_cache: + type: boolean + save_docker_cache: + type: boolean + test_suite: + type: string + default: "" + + steps: + + - checkout + + - setup_remote_docker + + - when: + condition: <> + steps: + - restore_cache: + keys: + - v1-docker-images-{{ .Branch }} + - run: + name: Load Docker layers cache + command: | + docker load -i ~/docker-layers-cache.tar || true + + - run: + name: Build the stack + command: docker-compose -f docker-compose.yml build + working_directory: scripts/spcgeonode/ + + - when: + condition: <> + steps: + - run: + name: Save Docker layers cache + command: | + rm -f ~/docker-layers-cache.tar + docker save -o ~/docker-layers-cache.tar $(docker images -a --format "{{.ID}}") + when: always + - save_cache: + key: v1-docker-images-{{ .Branch }}-{{ epoch }} + paths: + - ~/docker-layers-cache.tar + when: always + + - run: + name: Start the stack + command: docker-compose -f docker-compose.yml up -d --build + working_directory: scripts/spcgeonode/ + + - run: + name: Wait for everything to start... + command: | + n=1 + m=60 + until [ $n -gt $m ] + do + sleep 60 + DJANGO_STATUS=$(docker inspect --format="{{json .State.Health.Status}}" spcgeonode_django_1) + GEOSERVER_STATUS=$(docker inspect --format="{{json .State.Health.Status}}" spcgeonode_geoserver_1) + echo "" + echo "Waited $n min (out of $m min)" + if [[ $DJANGO_STATUS == '"healthy"' ]] && [[ $GEOSERVER_STATUS == '"healthy"' ]]; then + break + fi + echo "Not healthy yet..." + docker ps + n=$[$n+1] + done + [[ $DJANGO_STATUS == '"healthy"' ]] && [[ $GEOSERVER_STATUS == '"healthy"' ]]; + + - run: + name: Show state (debug) + command: docker ps + when: on_fail + + - run: + name: Geoserver logs (debug) + command: docker logs spcgeonode_geoserver_1 --tail 500 + when: on_fail + + - run: + name: Django logs (debug) + command: docker logs spcgeonode_django_1 --tail 500 + when: on_fail + + - when: + condition: <> + steps: + - run: + name: Run test suite + command: | + docker-compose -f docker-compose.yml exec django bash -c 'echo "Running <>"' + docker-compose -f docker-compose.yml exec postgres psql -U postgres -c 'SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid();' + docker-compose -f docker-compose.yml exec postgres createdb -U postgres -T postgres test_postgres + docker-compose -f docker-compose.yml run --rm django bash -c 'ASYNC_SIGNALS=False python manage.py test -v 3 --keepdb <>' + working_directory: scripts/spcgeonode/ + +workflows: + + commit: + jobs: + - build: + name: setup + load_docker_cache: true + save_docker_cache: true + test_suite: 'geonode.tests.smoke' + - build: + name: tests_integration + load_docker_cache: true + save_docker_cache: false + test_suite: 'geonode.tests.integration' + requires: + - setup + - build: + name: tests_core + load_docker_cache: true + save_docker_cache: false + test_suite: $(python -c "import sys;from geonode import settings;sys.stdout.write('\'' '\''.join([a+'\''.tests'\'' for a in settings.GEONODE_CORE_APPS]))") + requires: + - setup + - build: + name: tests_internal + load_docker_cache: true + save_docker_cache: false + test_suite: $(python -c "import sys;from geonode import settings;sys.stdout.write('\'' '\''.join([a+'\''.tests'\'' for a in settings.GEONODE_INTERNAL_APPS]))") + requires: + - setup + - build: + name: tests_contrib + load_docker_cache: true + save_docker_cache: false + test_suite: $(python -c "import sys;from geonode import settings;sys.stdout.write('\'' '\''.join([a+'\''.tests'\'' for a in settings.GEONODE_CONTRIB_APPS]))") + requires: + - setup + + nightly: + triggers: + - schedule: + cron: "0 0 * * *" + filters: + branches: + only: + - spcgeonode-release + - spcgeonode + jobs: + - build: + load_docker_cache: false + save_docker_cache: true + test_suite: geonode.tests.smoke geonode.tests.integration $(python -c "import sys;from geonode import settings;sys.stdout.write('\'' '\''.join([a+'\''.tests'\'' for a in settings.GEONODE_APPS]))") diff --git a/.dockerignore b/.dockerignore index b5946b0c18c..ce2c0518645 100644 --- a/.dockerignore +++ b/.dockerignore @@ -7,3 +7,5 @@ geonode/static/node_modules docs .coverage .celerybeat-* + +scripts/spcgeonode/_volume_* diff --git a/.gitignore b/.gitignore index e397e519642..ee58f225b14 100644 --- a/.gitignore +++ b/.gitignore @@ -84,3 +84,5 @@ geonode\.tests\.bdd\.e2e\.test_login/ /celerybeat.pid /celerybeat-schedule + +scripts/spcgeonode/_volume_* diff --git a/scripts/spcgeonode/.dockerignore b/scripts/spcgeonode/.dockerignore new file mode 100644 index 00000000000..11ea77af019 --- /dev/null +++ b/scripts/spcgeonode/.dockerignore @@ -0,0 +1,7 @@ +*.pyc +Thumbs.db +_volume_* +_service_* +*~ +celerybeat-schedule +celeryev.pid diff --git a/scripts/spcgeonode/.env b/scripts/spcgeonode/.env new file mode 100644 index 00000000000..0f32f40007f --- /dev/null +++ b/scripts/spcgeonode/.env @@ -0,0 +1,49 @@ +############################################################## +# # +# SPCgeonode Settings # +# # +# The defauts settings are suited for testing on localhost. # +# If you're deploying SPCgeonode for production, you need to # +# adapt the following settings # +# # +# DO NOT FORGET to also modify values in _secrets ! # +# # +############################################################## + +# Name of the setup (you only need to change this if you run several instances of the stack) +COMPOSE_PROJECT_NAME=spcgeonode + +# IP or domain name and port where the server can be reached on HTTPS (leave HOST empty if you want to use HTTP only) +HTTPS_HOST= +HTTPS_PORT=443 + +# IP or domain name and port where the server can be reached on HTTP (leave HOST empty if you want to use HTTPS only) +HTTP_HOST=127.0.0.1 +HTTP_PORT=80 + +# Email where alters should be sent. This will be used by let's encrypt and as the django admin email. +ADMIN_USERNAME=super +ADMIN_PASSWORD=duper +ADMIN_EMAIL=admin@example.com + +# Django secret key (replace this by any complex and random string) +SECRET_KEY=1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ + +# Let's Encrypt certificates for https encryption. You must have a domain name as HTTPS_HOST (doesn't work +# with an ip) and it must be reachable from the outside. This can be one of the following : +# disabled : we do not get a certificate at all (a placeholder certificate will be used) +# staging : we get staging certificates (are invalid, but allow to test the process completely and have much higher limit rates) +# production : we get a normal certificate (default) +LETSENCRYPT_MODE=disabled + +# Choose from https://en.wikipedia.org/wiki/List_of_tz_database_time_zones +TIME_ZONE=Pacific/Fiji + +# Whether users should be able to create accounts themselves +REGISTRATION_OPEN=True + +# Rclone backup configuration for Amazon S3 (leave empty if you don't want to use S3) +S3_ACCESS_KEY= +S3_SECRET_KEY= +S3_REGION= +S3_BUCKET= diff --git a/scripts/spcgeonode/CHANGELOG.md b/scripts/spcgeonode/CHANGELOG.md new file mode 100644 index 00000000000..01793da1ed7 --- /dev/null +++ b/scripts/spcgeonode/CHANGELOG.md @@ -0,0 +1,109 @@ +# Changelog + +## Version 2.x + +### 2.10rc4.1 + +- use env var instead of secrets (small benefit in terms of security not worth complexity) +- hardcode rclone S3 configuration (other provider would have to be hardcoded too) +- complete CI tests + +### 2.10rc4.0 + +- adopted Geonode's versions number (with an additionnal level for subreleases) +- moved the setup to the main Geonode repo under `scripts/spcgeonode`, this makes it easier to use as developement setup for Geonode +- use CircleCI (mostly to avoid interfering with existing travis setup) + +## Version 0.1.x (Geonode 2.10) + +**WARNING** YOU CANNOT UPGRADE FROM 0.0.x to 0.1.x +YOU NEED TO DO A FRESH INSTALL AND MANUALLY TRANSFER THE DATA + +### 0.1.1 + +- improved nginx config (gzip and expiration header) + +### 0.1.0 + +- targetting future 2.10 +- removed elastic search container (it was unused anyways) +- removed postgres login hack and using instead Geonode-Geoserver OAuth mecanism +- prebuilt geodatadir used again and master password procedure simplified +- added django healthcheck +- if https is enabled, force redirection to https host (as geonode doesn't support multiple domain names/relative installs) +- django secret generated automatically + +## Version 0.0.x (Geonode 2.6) + +### 0.0.25 + +- undo admin users disabled again +- revert using 2.6.x branch (because of side effect - login taking ages) + +### 0.0.24 + +- use Geonode's Geoserver .war build instead of starting from vanilla +- fix thumbnail generation (uses a custom release of Geonode) +- django admin users are again disabled on restart (so we can keep only 1 superuser) +- added travis integration test (try to deploy django then tries to create an user, upload a layer, get the thumbnail and get a tile of the layer) +- changed rclone configuration (you must now provide rclone conf file) +- removed syncthings +- make http(s) ports parametrable in case a port is already busy + +### 0.0.23 + +- various fixes (broken pip dependencies, wrong fix for geoserver proxy, ssl certificate refreshing) + +### 0.0.22 + +- siteurl set using HTTPS_HOST or HTTP_HOST (instead of "/" which isn't supported) + +### 0.0.21 + +- use custom build of geonode (with some fixes not upstreamed yet) + +### 0.0.18 + +- geoserver master password reset is cleaner (programmatically reset the password from initial datadir before first launch) +- support empty HTTP_HOST or HTTPS_HOST +- geosever 2.12.1 => 2.12.2 +- cleaned up env vars +- upgrade should work + +### 0.0.17 + +- improve nginx<->letsencrypt (nginx can work without letsencrypt service) + +### 0.0.16 + +- put django in main directory (so it's more clear for deploy builds) + +### 0.0.15 + +- removed rancher template from repo +- removed entryponts and command from django image to prevent what looks like a bug in rancher where empty entrypoint in docker-compose isn't taken into account + +### 0.0.11 + +- added a second backup service using RClone (the idea is to test both syncthings and rclone then choose one) + +### 0.0.10 + +- we don't rely on an initial geodatadir anymore, instead we start from scratch, launch geoserver once, then do our modifications +- added a backup service using Syncthings + +### 0.0.9 + +- fix bug with rancher resolver on rancher + +### 0.0.8 + +- allow to disable/test let's encrypt using env variables +- we use geonode users/groups table directly for geoserver's authentication + +### 0.0.7 + +- have ssl working online +- use env variables / secrets where applicable +- publish on git and autobuild images +- make docker deploy work again diff --git a/scripts/spcgeonode/README.md b/scripts/spcgeonode/README.md new file mode 100644 index 00000000000..265655eefd9 --- /dev/null +++ b/scripts/spcgeonode/README.md @@ -0,0 +1,226 @@ +# SPCgeonode [![CircleCI](https://circleci.com/gh/olivierdalang/geonode.svg?style=svg)](https://circleci.com/gh/olivierdalang/geonode) + +SPCgeonode is a setup for Geonode deployement at SPC. It makes it easy to deploy a production ready Geonode. The setup aims for simplicity over flexibility, so that it will only apply for typical small scale Geonode installations. + +The setup is also usable for Geonode developement or customization. + + +## Prerequisites + +Make sure you have a version of Docker (tested with 17.12) and docker-compose. + +- Linux : https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-from-a-package and https://docs.docker.com/compose/install/#install-compose +- Windows : https://store.docker.com/editions/community/docker-ce-desktop-windows +- Mac : https://store.docker.com/editions/community/docker-ce-desktop-mac + +## Usage + +All the following commands happen from this folder : + +``` +cd /path/to/geonode/scripts/spcgeonode +``` + +### Development + +To start only the main services (should be enough for developement) : +``` +docker-compose up --build -d django geoserver postgres nginx +``` + +To start the whole stack : +``` +docker-compose up --build -d +``` + +If not familiar with Docker, read below to know how to see what's happening. On first start, the containers will restart serveral times. Once everything started, you should be able to open http://127.0.0.1 in your browser. See how to edit the configuration below if you install on another computer. + +### Production (using composer) + +Using a text editor, edit the `.env` file (you can also override those with environment variables) : +``` +# General configuration +nano .env +``` + +When ready, start the stack using this commands : +``` +# Run the stack +docker-compose -f docker-compose.yml up -d --build +``` + +Alternatively, you can pull the images from dockerhub instead of rebuilding (only applies if you haven't changed the docker setup) : +``` +# Pull the images and run the stack +docker-compose -f docker-compose.yml pull +docker-compose -f docker-compose.yml up -d +``` + +If not familiar with Docker, read below to know how to see what's happening. On first start, the containers will restart serveral times. Once everything started, you should be able to open http://your_http_host or https://your_https_host in your browser. + +### Upgrade + +If at some point you want to update the SPCgeonode setup (this will work only if you didn't do modifications, if you did, you need to merge them) : +``` +# Get the update setup +git pull + +# Upgrade the stack +docker-compose -f docker-compose.yml up -d --build +``` + +### Developpement vs Production + +Difference of dev setup vs prod setup: + +- Django source is mounted on the host and uwsgi does live reload (so that edits to the python code is reloaded live) +- Django static and media folder, Geoserver's data folder and Certificates folder are mounted on the host (just to easily see what's happening) +- Django debug is set to True +- Postgres's port 5432 is exposed (to allow debugging using pgadmin) +- Nginx debug mode is acticated (not really sure what this changes) +- Docker tags are set to dev instead of latest + +### Releases + + +To make a release : + +- checkout spcgeonode-release +- merge spcgeonode +- replace the version tag in docker-compose.yml with the version (format `x.x.x`) +- commit +- create a git tag (format `spc/x.x.x`) +- push spcgeonode-release with tags + +This will trigger an automatic build on docker hub. + +If you need to manually publish the image (e.g. dockerhub build fail) : + +``` +docker login +docker-compose -f docker-compose.yml build +docker-compose -f docker-compose.yml push +``` + +## FAQ + +### Docker-primer - How to see what's happening ? + +If not familiar with Docker, here are some useful commands : + +- `docker ps` : list all containers and their status +- `docker-compose logs -f` : show live stdout from all containers +- `docker-compose logs -f django` : show live stdout from a specific container (replace `django` by `geoserver`, `postgres`, etc.) +- `docker-compose down -v` : brings the stack down including volumes, allowing you to restart from scratch **THIS WILL ERASE ALL DATA !!** + +### During startup, a lot of container crash and restart, is it normal ? + +This is the normal startup process. Due to the nature of the setup, the containers are very interdependant. Startup from scratch can take approx. 5-10 minutes, during which all containers may restart a lot of times. + +In short, Django will restart until Postgres is up so it can migrate the database. Geoserver will restart until Django has configured OAuth so it can get OAuth2 configuration. Django will restart until Geoserver is running so it can reinitialize the master password. + +### Backups + +*Backups* are made using [RClone](https://rclone.org/docs/). RClone is a flexible file syncing tool that supports all commons cloud provider, regular file transfer protocols as well as local filesystem. It should be able to accomodate almost any setup. + +The only available configuration provided with the setup assumes Amazon S3 is being used, in which case you need to replace the following parts of the `rclone.backup.config` file : `YOUR_S3_ACCESS_KEY_HERE`,`YOUR_S3_SECRET_KEY_HERE`,`YOUR_S3_REGION_HERE` and `THE_NAME_OF_YOUR_BUCKET_HERE` (watch [this](https://www.youtube.com/watch?v=BLTy2tQXQLY) to learn how to get these keys). + +Also consider enabling *versionning* on the Bucket, so that if data won't get lost if deleted accidentally in GeoNode. + +If you want to setup backups using another provider, check the [RClone documentation](https://rclone.org/docs/). It should be easy to add any RClone supported provider to SPCgeonode. + +### How to migrate from an existing standard Geonode install + +This section lists the steps done to migrate from an apt-get install of Geonode 2.4.1 (with Geoserver 2.7.4) to a fresh SPCGeonode 0.1 install. It is meant as a guide only as some steps may need some tweaking depending on your installation. Do not follow these steps if you don't understand what you're doing. + +#### Prerequisites + +- access to the original server +- a new server for the install (can be the same than the first one if you don’t fear losing all data) - ideally linux but should be OK as long as it runs docker (64bits) +- an external hard-drive to copy data over + +#### On the old server + +``` +# Move to the external hard drive +cd /path/to/your/external/drive + +# Find the current database password (look for DATABASE_PASSWORD, in my case it was XbFAyE4w) +more /etc/geonode/local_settings.py + +# Dump the database content (you will be prompted several time for the password above) +pg_dumpall --host=127.0.0.1 --username=geonode --file=pg_dumpall.custom + +# Copy all uploaded files +cp -r /var/www/geonode/uploaded uploaded + +# Copy geoserver data directory +cp -r /usr/share/geoserver/data geodatadir +``` + +#### On the new server + +Setup SPCGeonode by following the prerequisite and production steps on https://github.com/olivierdalang/SPCgeonode/tree/release up to (but not including) run the stack. + +Then run these commands : + +``` +# Prepare the stack (without running) +docker-compose -f docker-compose.yml pull --no-parallel +docker-compose -f docker-compose.yml up --no-start + +# Start the database +docker-compose -f docker-compose.yml up -d postgres + +# Initialize geoserver (to create the geodatadir - this will fail because Django/Postgres arent started yet - but this is expected) +docker-compose -f docker-compose.yml run --rm geoserver exit 0 + +# Go to the external drive +cd /path/to/drive/ + +# Restore the dump (this can take a while if you have data in postgres) +cat pg_dumpall.custom | docker exec -i spcgeonode_postgres_1 psql -U postgres +# Rename the database to postgres +docker exec -i spcgeonode_postgres_1 dropdb -U postgres postgres +docker exec -i spcgeonode_postgres_1 psql -d template1 -U postgres -c "ALTER DATABASE geonode RENAME TO postgres;" + +# Restore the django uploaded files +docker cp uploaded/. spcgeonode_django_1:/spcgeonode-media/ + +# Restore the workspaces and styles of the geoserver data directory +docker cp geodatadir/styles/. spcgeonode_geoserver_1:/spcgeonode-geodatadir/styles +docker cp geodatadir/workspaces/. spcgeonode_geoserver_1:/spcgeonode-geodatadir/workspaces +docker cp geodatadir/data/. spcgeonode_geoserver_1:/spcgeonode-geodatadir/data + +# Back to SPCgeonode +cd /path/to/SPCgeonode + +# Fix some inconsistency that prevents migrations (public.layers_layer shouldn’t have service_id column) +docker exec -i spcgeonode_postgres_1 psql -U postgres -c "ALTER TABLE public.layers_layer DROP COLUMN service_id;" + +# Migrate with fake initial +docker-compose -f docker-compose.yml run --rm --entrypoint "python manage.py migrate --fake-initial" django + +# Create the SQL diff to fix the schema # TODO : upstream some changes to django-extensions for this to work directly +docker-compose -f docker-compose.yml run --rm --entrypoint '/bin/sh -c "DJANGO_COLORS=nocolor python manage.py sqldiff -ae"' django >> fix.sql + +# Manually fix the SQL command until it runs (you can also drop the tables that have no model) +nano fix.sql + +# Apply the SQL diff (review the sql file first as this may delete some important tables) +cat fix.sql | docker exec -i spcgeonode_postgres_1 psql -U postgres + +# Set all layers as approved +docker exec -i spcgeonode_postgres_1 psql -U postgres -c 'UPDATE base_resourcebase SET is_approved = TRUE;' + +# This time start the stack +docker-compose -f docker-compose.yml up -d +``` + +One last step was to connect to the GeoServer administration and change the PostGIS store host, user and password to 'postgres'. + +### On windows, I have error like `standard_init_linux.go:190: exec user process caused "no such file or directory"` + +This may be due to line endings. When checking out files, git optionnaly converts line endings to match the platform, which doesn't work well it `.sh` files. + +To fix, use `git config --global core.autocrlf false` and checkout again. diff --git a/scripts/spcgeonode/ROADMAP.md b/scripts/spcgeonode/ROADMAP.md new file mode 100644 index 00000000000..0f8f1fd4809 --- /dev/null +++ b/scripts/spcgeonode/ROADMAP.md @@ -0,0 +1,24 @@ +# Roadmap + +## Before merging to master + +- CRITICAL : change rest.properties config +- CRITICAL : see if Geoserver authkey tokens expire (even when the key is deleted from the database, it's still possible to use it until manually clicking "sync user/group service". It looks like it's some cache, but I don't know if it expires. Maybe we need to use webservice instead of user property...) +- fix updatelayerip on startup (currently creates a mess in links when host/port changes and deletes custom thumbnails) +- make monitoring module work (currently it's disabled because of some exception during startup) +- move the README to the documentation +- move this roadmap to github issues + +## Eventually + +- check if everything is ok with auth_keys (it seems Geonode uses expired keys...) +- tweak nginx settings (gzip output, cache, etc...) +- use alpine for django as well +- migrate to spc repositories instead of olivierdalang +- see if we can have geoserver exit on error, in not at least implement proper healtcheck +- keep a version marker in the geodatadir directory in case of updates to the datadir +- set more reasonable logging for geoserver +- add at least some basic integration test to travis +- see if we can setup something for backups on local filesystem +- serve static files from django directly rather than from nginx when developping (to see changes without collectstatic) +- add a service to run grunt tasks when developping diff --git a/scripts/spcgeonode/django/Dockerfile b/scripts/spcgeonode/django/Dockerfile new file mode 100644 index 00000000000..1dcb363698c --- /dev/null +++ b/scripts/spcgeonode/django/Dockerfile @@ -0,0 +1,51 @@ +# TODO : use python:2.7.13-alpine3.6 to make this lighter ( it is what we use for letsencryipt as well) +# But it seems it's not possible for now because alpine only has geos 3.6 which is not supported by django 1.8 +# (probably because of https://code.djangoproject.com/ticket/28441) + +FROM python:2.7.14-slim-stretch + +# Install system dependencies +RUN echo "Updating apt-get" && \ + apt-get update && \ + echo "Installing build dependencies" && \ + apt-get install -y gcc make libc-dev musl-dev libpcre3 libpcre3-dev g++ && \ + echo "Installing Pillow dependencies" && \ + # RUN apt-get install -y NOTHING ?? It was probably added in other packages... ALPINE needed jpeg-dev zlib-dev && \ + echo "Installing GDAL dependencies" && \ + apt-get install -y libgeos-dev libgdal-dev && \ + echo "Installing Psycopg2 dependencies" && \ + # RUN apt-get install -y NOTHING ?? It was probably added in other packages... ALPINE needed postgresql-dev && \ + echo "Installing other dependencies" && \ + apt-get install -y libxml2-dev libxslt-dev && \ + echo "Installing GeoIP dependencies" && \ + apt-get install -y geoip-bin geoip-database && \ + echo "Installing healthceck dependencies" && \ + apt-get install -y curl && \ + echo "Python server" && \ + pip install uwsgi && \ + echo "Removing build dependencies and cleaning up" && \ + # TODO : cleanup apt-get with something like apt-get -y --purge autoremove gcc make libc-dev musl-dev libpcre3 libpcre3-dev g++ && \ + rm -rf /var/lib/apt/lists/* && \ + rm -rf ~/.cache/pip + +# Install python dependencies +RUN echo "Geonode python dependencies" +RUN pip install pygdal==$(gdal-config --version).* +RUN pip install celery==4.1.0 # see https://github.com/GeoNode/geonode/pull/3714 + +# Install geonode dependencies +ADD requirements.txt /requirements.txt +RUN pip install -r requirements.txt +RUN rm requirements.txt + +# Install geonode +RUN mkdir /spcgeonode +WORKDIR /spcgeonode/ +ADD . /spcgeonode/ +RUN pip install -e . +RUN chmod +x scripts/spcgeonode/django/docker-entrypoint.sh + +# Export ports +EXPOSE 8000 + +# We provide no command or entrypoint as this image can be used to serve the django project or run celery tasks diff --git a/scripts/spcgeonode/django/docker-entrypoint.sh b/scripts/spcgeonode/django/docker-entrypoint.sh new file mode 100755 index 00000000000..ec7ba2fe168 --- /dev/null +++ b/scripts/spcgeonode/django/docker-entrypoint.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# Exit script in case of error +set -e + +echo $"\n\n\n" +echo "-----------------------------------------------------" +echo "STARTING DJANGO ENTRYPOINT $(date)" +echo "-----------------------------------------------------" + +# Setting dynamic env vars (some of this could probably be put in docker-compose once +# https://github.com/docker/compose/pull/5268 is merged, or even better hardcoded if +# geonode supported relative site urls) +if [ ! -z "$HTTPS_HOST" ]; then + export SITEURL="https://$HTTPS_HOST" + if [ "$HTTPS_PORT" != "443" ]; then + SITEURL="$SITEURL:$HTTPS_PORT" + fi +else + export SITEURL="http://$HTTP_HOST" + if [ "$HTTP_PORT" != "80" ]; then + SITEURL="$SITEURL:$HTTP_PORT" + fi +fi + +export GEOSERVER_PUBLIC_LOCATION="${SITEURL}/geoserver/" + +# Run migrations +echo 'Running initialize.py...' +python -u scripts/spcgeonode/django/initialize.py + +echo "-----------------------------------------------------" +echo "FINISHED DJANGO ENTRYPOINT --------------------------" +echo "-----------------------------------------------------" + +# Run the CMD +exec "$@" diff --git a/scripts/spcgeonode/django/initialize.py b/scripts/spcgeonode/django/initialize.py new file mode 100644 index 00000000000..eaa77ef8c83 --- /dev/null +++ b/scripts/spcgeonode/django/initialize.py @@ -0,0 +1,133 @@ +""" +This script initializes Geonode +""" + +######################################################### +# Setting up the context +######################################################### + +import os, requests, json, uuid, django +django.setup() + +######################################################### +# Imports +######################################################### + +from django.core.management import call_command +from geonode.people.models import Profile +from oauth2_provider.models import Application +from django.conf import settings + +# Getting the secrets +admin_username = os.getenv('ADMIN_USERNAME') +admin_password = os.getenv('ADMIN_PASSWORD') +admin_email = os.getenv('ADMIN_EMAIL') + + +######################################################### +# 1. Running the migrations +######################################################### + +print("-----------------------------------------------------") +print("1. Running the migrations") +call_command('migrate', '--noinput') + + +######################################################### +# 2. Creating superuser if it doesn't exist +######################################################### + +print("-----------------------------------------------------") +print("2. Creating/updating superuser") +try: + superuser = Profile.objects.get(username=admin_username) + superuser.set_password(admin_password) + superuser.is_active = True + superuser.email = admin_email + superuser.save() + print('superuser successfully updated') +except Profile.DoesNotExist: + superuser = Profile.objects.create_superuser( + admin_username, + admin_email, + admin_password + ) + print('superuser successfully created') + + +######################################################### +# 3. Create an OAuth2 provider to use authorisations keys +######################################################### + +print("-----------------------------------------------------") +print("3. Create/update an OAuth2 provider to use authorisations keys") +app, created = Application.objects.get_or_create( + pk=1, + name='GeoServer', + client_type='confidential', + authorization_grant_type='authorization-code' +) +redirect_uris = [ + 'http://{}/geoserver'.format(os.getenv('HTTPS_HOST',"") if os.getenv('HTTPS_HOST',"") != "" else os.getenv('HTTP_HOST')), + 'http://{}/geoserver/index.html'.format(os.getenv('HTTPS_HOST',"") if os.getenv('HTTPS_HOST',"") != "" else os.getenv('HTTP_HOST')), +] +app.redirect_uris = "\n".join(redirect_uris) +app.save() +if created: + print('oauth2 provider successfully created') +else: + print('oauth2 provider successfully updated') + + +######################################################### +# 4. Loading fixtures +######################################################### + +print("-----------------------------------------------------") +print("4. Loading fixtures") +call_command('loaddata', 'initial_data') + + +######################################################### +# 5. Running updatemaplayerip +######################################################### + +print("-----------------------------------------------------") +print("5. Running updatemaplayerip") +# call_command('updatelayers') # TODO CRITICAL : this overrides the layer thumbnail of existing layers even if unchanged !!! +call_command('updatemaplayerip') + + +######################################################### +# 6. Collecting static files +######################################################### + +print("-----------------------------------------------------") +print("6. Collecting static files") +call_command('collectstatic', '--noinput', verbosity=0) + + +######################################################### +# 7. Securing GeoServer +######################################################### + +print("-----------------------------------------------------") +print("7. Securing GeoServer") + +# Getting the old password +try: + r1 = requests.get('http://geoserver:8080/geoserver/rest/security/masterpw.json', auth=(admin_username, admin_password)) +except requests.exceptions.ConnectionError as e: + print("Unable to connect to GeoServer. Make sure GeoServer is started and accessible.") + exit(1) +r1.raise_for_status() +old_password = json.loads(r1.text)["oldMasterPassword"] + +if old_password=='M(cqp{V1': + print("Randomizing master password") + new_password = uuid.uuid4().hex + data = json.dumps({"oldMasterPassword":old_password,"newMasterPassword":new_password}) + r2 = requests.put('http://geoserver:8080/geoserver/rest/security/masterpw.json', data=data, headers={'Content-Type': 'application/json'}, auth=(admin_username, admin_password)) + r2.raise_for_status() +else: + print("Master password was already changed. No changes made.") diff --git a/scripts/spcgeonode/docker-compose.override.yml b/scripts/spcgeonode/docker-compose.override.yml new file mode 100644 index 00000000000..407e08937f0 --- /dev/null +++ b/scripts/spcgeonode/docker-compose.override.yml @@ -0,0 +1,60 @@ +version: '3.4' + + +# Common Django template for Geonode, Celery and Celerycam services below +x-common-django: + &default-common-django + image: olivierdalang/spcgeonode:django-dev + environment: + - DEBUG=True + volumes: + - ../../:/spcgeonode/ + - ./_volume_static:/spcgeonode-static/ + - ./_volume_media:/spcgeonode-media/ + +services: + django: + << : *default-common-django + command: "uwsgi --chdir=/spcgeonode --module=geonode.wsgi --socket=:8000 --http=127.0.0.1:8001 --processes=5 --py-autoreload=2" + celery: + << : *default-common-django + command: 'celery worker --app=geonode.celery_app:app -l debug' + celerybeat: + << : *default-common-django + command: 'celery beat --app=geonode.celery_app:app --pidfile="/celerybeat.pid" -l debug' + celerycam: + << : *default-common-django + command: 'celery events --app=geonode.celery_app:app --pidfile="/celeryev.pid" --camera=django_celery_monitor.camera.Camera --frequency=2.0 -l debug' + + nginx: + image: olivierdalang/spcgeonode:nginx-dev + volumes: + - ./_volume_static:/spcgeonode-static/ + - ./_volume_media:/spcgeonode-media/ + - ./_volume_certificates:/spcgeonode-certificates/ + + geoserver: + image: olivierdalang/spcgeonode:geoserver-dev + volumes: + - ./_volume_geodatadir:/spcgeonode-geodatadir/ + + letsencrypt: + image: olivierdalang/spcgeonode:letsencrypt-dev + volumes: + - ./_volume_certificates:/spcgeonode-certificates/ + + pgdumper: + image: olivierdalang/spcgeonode:pgdumper-dev + volumes: + - ./_volume_pgdumps:/spcgeonode-pgdumps/ + + rclone: + image: olivierdalang/spcgeonode:rclone-dev + volumes: + - ./_volume_pgdumps:/spcgeonode-pgdumps/ + - ./_volume_media:/spcgeonode-media/ + - ./_volume_geodatadir:/spcgeonode-geodatadir/ + + postgres: + ports: + - "5432:5432" diff --git a/scripts/spcgeonode/docker-compose.yml b/scripts/spcgeonode/docker-compose.yml new file mode 100644 index 00000000000..0aebc180642 --- /dev/null +++ b/scripts/spcgeonode/docker-compose.yml @@ -0,0 +1,169 @@ +version: '3.4' + +# Common Django template for Geonode, Celery and Celerycam services below +x-common-django: + &default-common-django + image: olivierdalang/spcgeonode:django-latest + build: + context: ../../ + dockerfile: scripts/spcgeonode/django/Dockerfile + environment: + # editable in .env + - HTTPS_HOST=${HTTPS_HOST} + - HTTPS_PORT=${HTTPS_PORT} + - HTTP_HOST=${HTTP_HOST} + - HTTP_PORT=${HTTP_PORT} + - ADMIN_USERNAME=${ADMIN_USERNAME} + - ADMIN_PASSWORD=${ADMIN_PASSWORD} + - ADMIN_EMAIL=${ADMIN_EMAIL} + - GEOSERVER_ADMIN_USER=${ADMIN_USERNAME} + - GEOSERVER_ADMIN_PASSWORD=${ADMIN_PASSWORD} + - REGISTRATION_OPEN=${REGISTRATION_OPEN} + - TIME_ZONE=${TIME_ZONE} + - ALLOWED_HOSTS=['nginx','127.0.0.1','localhost','$HTTPS_HOST','$HTTP_HOST'] + - SECRET_KEY=${SECRET_KEY} + # hardcoded + - DEBUG=False + - DJANGO_SETTINGS_MODULE=geonode.settings + - DATABASE_URL=postgres://postgres:postgres@postgres:5432/postgres + - BROKER_URL=amqp://rabbitmq:5672 + - STATIC_ROOT=/spcgeonode-static/ + - MEDIA_ROOT=/spcgeonode-media/ + - STATIC_URL=/static/ + - MEDIA_URL=/uploaded/ + - GEOSERVER_LOCATION=http://nginx/geoserver/ + - ASYNC_SIGNALS=True + # TODO : we should probably remove this and set Celery to use JSON serialization instead of pickle + - C_FORCE_ROOT=True + # We get an exception after migrations on startup (it seems the monitoring app tries to resolve the geoserver domain name after it's migration, which can happen before oauth migrations on which geoserver startup depends...) + - MONITORING_ENABLED=False + volumes: + - static:/spcgeonode-static/ + - media:/spcgeonode-media/ + restart: on-failure + +services: + + # Our custom django application. It includes Geonode. + django: + << : *default-common-django + healthcheck: + test: "curl --fail --silent --write-out 'HTTP CODE : %{http_code}\n' --output /dev/null http://127.0.0.1:8001/" + interval: 60s + timeout: 10s + retries: 1 + start_period: 60s + entrypoint: ["/spcgeonode/scripts/spcgeonode/django/docker-entrypoint.sh"] + command: "uwsgi --chdir=/spcgeonode --module=geonode.wsgi --socket=:8000 --http=127.0.0.1:8001 --processes=5" + + # Celery worker that executes celery tasks created by Django. + celery: + << : *default-common-django + entrypoint: [] + command: 'celery worker --app=geonode.celery_app:app -l info -E' + + # Celery beat that triggers scheduled tasks + celerybeat: + << : *default-common-django + entrypoint: [] + command: 'celery beat --app=geonode.celery_app:app --pidfile="/celerybeat.pid" -l info' + + # Celery camera that monitors celery tasks and populate the djcelery django admin interface + celerycam: + << : *default-common-django + entrypoint: [] + command: 'celery events --app=geonode.celery_app:app --pidfile="/celeryev.pid" --camera=django_celery_monitor.camera.Camera --frequency=2.0 -l info' + + # Nginx is serving django static and media files and proxies to django and geonode + nginx: + image: olivierdalang/spcgeonode:nginx-latest + build: ./nginx/ + environment: + - HTTPS_HOST=${HTTPS_HOST} + - HTTP_HOST=${HTTP_HOST} + - LETSENCRYPT_MODE=${LETSENCRYPT_MODE} + - RESOLVER=127.0.0.11 + ports: + - "${HTTP_PORT}:80" + - "${HTTPS_PORT}:443" + volumes: + - static:/spcgeonode-static/ + - media:/spcgeonode-media/ + - certificates:/spcgeonode-certificates/ + restart: on-failure + + # Geoserver backend + geoserver: + image: olivierdalang/spcgeonode:geoserver-latest + build: ./geoserver/ + healthcheck: + test: "curl --fail --silent --write-out 'HTTP CODE : %{http_code}\n' --output /dev/null http://127.0.0.1:8080/geoserver/rest/workspaces/geonode.html" + interval: 60s + timeout: 10s + retries: 1 + start_period: 60s + environment: + - HTTPS_HOST=${HTTPS_HOST} + - HTTPS_PORT=${HTTPS_PORT} + - HTTP_HOST=${HTTP_HOST} + - HTTP_PORT=${HTTP_PORT} + - ADMIN_USERNAME=${ADMIN_USERNAME} + - ADMIN_PASSWORD=${ADMIN_PASSWORD} + volumes: + - geodatadir:/spcgeonode-geodatadir/ + restart: on-failure + + # Gets and installs letsencrypt certificates + letsencrypt: + image: olivierdalang/spcgeonode:letsencrypt-latest + build: ./letsencrypt/ + environment: + - HTTPS_HOST=${HTTPS_HOST} + - HTTP_HOST=${HTTP_HOST} + - ADMIN_EMAIL=${ADMIN_EMAIL} + - LETSENCRYPT_MODE=${LETSENCRYPT_MODE} + volumes: + - certificates:/spcgeonode-certificates/ + restart: on-failure + + pgdumper: + image: olivierdalang/spcgeonode:pgdumper-latest + build: ./pgdumper/ + volumes: + - pgdumps:/spcgeonode-pgdumps/ + restart: on-failure + + rclone: + image: olivierdalang/spcgeonode:rclone-latest + build: ./rclone/ + environment: + - S3_ACCESS_KEY=${S3_ACCESS_KEY} + - S3_SECRET_KEY=${S3_SECRET_KEY} + - S3_REGION=${S3_REGION} + - S3_BUCKET=${S3_BUCKET} + volumes: + - pgdumps:/spcgeonode-pgdumps/ + - media:/spcgeonode-media/ + - geodatadir:/spcgeonode-geodatadir/ + restart: on-failure + + # PostGIS database. + postgres: + image: mdillon/postgis:9.6-alpine + volumes: + - database:/var/lib/postgresql/data/ + restart: on-failure + + # Vanilla RabbitMQ service. This is needed by celery + rabbitmq: + image: rabbitmq:3.7-alpine + restart: on-failure + +volumes: + static: + media: + database: + geodatadir: + certificates: + pgdumps: + diff --git a/scripts/spcgeonode/geoserver/Dockerfile b/scripts/spcgeonode/geoserver/Dockerfile new file mode 100644 index 00000000000..b94fb1888bd --- /dev/null +++ b/scripts/spcgeonode/geoserver/Dockerfile @@ -0,0 +1,43 @@ +FROM openjdk:8-jre-alpine + +# Install dependencies +RUN apk add --no-cache ca-certificates openssl curl postgresql-client fontconfig ttf-ubuntu-font-family +RUN update-ca-certificates + +WORKDIR / + +# Download Geoserver +# we first download vanilla geoserver, as it comes with preset jetty and launch scripts +# then we replace it with the geonode build +# TODO : this is a bit dirty..... can't we stat from vanilla Geoserver ? +# TODO : merge into on step +RUN echo "Download geoserver for geonode" && \ + wget https://downloads.sourceforge.net/project/geoserver/GeoServer/2.14.0/geoserver-2.14.0-bin.zip && \ + wget https://build.geo-solutions.it/geonode/geoserver/latest/geoserver-2.14.x.war --no-check-certificate && \ + unzip geoserver-2.14.0-bin.zip && \ + rm /geoserver-2.14.0-bin.zip && \ + rm /geoserver-2.14.0/webapps/geoserver/* -rf && \ + unzip -o geoserver-2.14.x.war -d /geoserver-2.14.0/webapps/geoserver/ && \ + rm /geoserver-2.14.x.war + +# Download initial data dir +RUN wget https://build.geo-solutions.it/geonode/geoserver/latest/data-2.14.x.zip --no-check-certificate +RUN unzip /data-2.14.x.zip +RUN ls /data + +WORKDIR /geoserver-2.14.0/ + +# Add the entrypoint +ADD docker-entrypoint.sh /docker-entrypoint.sh +RUN chmod +x /docker-entrypoint.sh +ENTRYPOINT ["/docker-entrypoint.sh"] + +# Export ports +EXPOSE 8080 + +# Set environnment variables +ENV GEOSERVER_HOME=/geoserver-2.14.0 +ENV GEOSERVER_DATA_DIR=/spcgeonode-geodatadir + +# Run geoserver +CMD ["bin/startup.sh"] diff --git a/scripts/spcgeonode/geoserver/docker-entrypoint.sh b/scripts/spcgeonode/geoserver/docker-entrypoint.sh new file mode 100644 index 00000000000..dc0c35c9e92 --- /dev/null +++ b/scripts/spcgeonode/geoserver/docker-entrypoint.sh @@ -0,0 +1,114 @@ +#!/bin/sh + +# Exit script in case of error +set -e + +echo $"\n\n\n" +echo "-----------------------------------------------------" +echo "STARTING GEOSERVER ENTRYPOINT -----------------------" +date + + +############################ +# 0. Defining BASEURL +############################ + +echo "-----------------------------------------------------" +echo "0. Defining BASEURL" + +if [ ! -z "$HTTPS_HOST" ]; then + BASEURL="https://$HTTPS_HOST" + if [ "$HTTPS_PORT" != "443" ]; then + BASEURL="$BASEURL:$HTTPS_PORT" + fi +else + BASEURL="http://$HTTP_HOST" + if [ "$HTTP_PORT" != "80" ]; then + BASEURL="$BASEURL:$HTTP_PORT" + fi +fi + +echo "BASEURL is $BASEURL" + +############################ +# 1. Initializing Geodatadir +############################ + +echo "-----------------------------------------------------" +echo "1. Initializing Geodatadir" + +if [ "$(ls -A /spcgeonode-geodatadir)" ]; then + echo 'Geodatadir not empty, skipping initialization...' +else + echo 'Geodatadir empty, we run initialization...' + cp -rf /data/* /spcgeonode-geodatadir/ +fi + + +############################ +# 2. ADMIN ACCOUNT +############################ + +# This section is not strictly required but allows to login geoserver with the admin account even if OAuth2 is unavailable (e.g. if Django can't start) + +echo "-----------------------------------------------------" +echo "2. (Re)setting admin account" + +ADMIN_ENCRYPTED_PASSWORD=$(/usr/lib/jvm/java-1.8-openjdk/jre/bin/java -classpath /geoserver-2.14.0/webapps/geoserver/WEB-INF/lib/jasypt-1.9.2.jar org.jasypt.intf.cli.JasyptStringDigestCLI digest.sh algorithm=SHA-256 saltSizeBytes=16 iterations=100000 input="$ADMIN_PASSWORD" verbose=0 | tr -d '\n') +sed -i -r "s|||" "/spcgeonode-geodatadir/security/usergroup/default/users.xml" +# TODO : more selective regexp for this one as there may be several users... +sed -i -r "s|||" "/spcgeonode-geodatadir/security/role/default/roles.xml" +ADMIN_ENCRYPTED_PASSWORD="" + + +############################ +# 3. OAUTH2 CONFIGURATION +############################ + +echo "-----------------------------------------------------" +echo "3. (Re)setting OAuth2 Configuration" + +# Edit /spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml + +# Getting oauth keys and secrets from the database +CLIENT_ID=$(psql -h postgres -U postgres -c "SELECT client_id FROM oauth2_provider_application WHERE name='GeoServer'" -t | tr -d '[:space:]') +CLIENT_SECRET=$(psql -h postgres -U postgres -c "SELECT client_secret FROM oauth2_provider_application WHERE name='GeoServer'" -t | tr -d '[:space:]') +if [ -z "$CLIENT_ID" ] || [ -z "$CLIENT_SECRET" ]; then + echo "Could not get OAuth2 ID and SECRET from database. Make sure Postgres container is started and Django has finished it's migrations." + exit 1 +fi + +sed -i -r "s|.*|$CLIENT_ID|" "/spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml" +sed -i -r "s|.*|$CLIENT_SECRET|" "/spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml" +# OAuth endpoints (client) +sed -i -r "s|.*|$BASEURL/o/authorize/|" "/spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml" +sed -i -r "s|.*|$BASEURL/geoserver/index.html|" "/spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml" +sed -i -r "s|.*|$BASEURL/account/logout/|" "/spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml" +# OAuth endpoints (server) +sed -i -r "s|.*|http://nginx/o/token/|" "/spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml" +sed -i -r "s|.*|http://nginx/api/o/v4/tokeninfo/|" "/spcgeonode-geodatadir/security/filter/geonode-oauth2/config.xml" + +# Edit /security/role/geonode REST role service/config.xml +sed -i -r "s|.*|http://nginx|" "/spcgeonode-geodatadir/security/role/geonode REST role service/config.xml" + +CLIENT_ID="" +CLIENT_SECRET="" + + +############################ +# 3. RE(SETTING) BASE URL +############################ + +echo "-----------------------------------------------------" +echo "4. (Re)setting Baseurl" + +sed -i -r "s|.*|$BASEURL|" "/spcgeonode-geodatadir/global.xml" + + + +echo "-----------------------------------------------------" +echo "FINISHED GEOSERVER ENTRYPOINT -----------------------" +echo "-----------------------------------------------------" + +# Run the CMD +exec "$@" diff --git a/scripts/spcgeonode/letsencrypt/Dockerfile b/scripts/spcgeonode/letsencrypt/Dockerfile new file mode 100644 index 00000000000..28c1950bb05 --- /dev/null +++ b/scripts/spcgeonode/letsencrypt/Dockerfile @@ -0,0 +1,19 @@ +FROM alpine:3.6 + +# 1-2. Install system dependencies +RUN apk add --no-cache certbot + +# Installing scripts +ADD docker-entrypoint.sh /docker-entrypoint.sh +RUN chmod +x /docker-entrypoint.sh + +# Installing cronjobs +ADD crontab /crontab +RUN /usr/bin/crontab /crontab && \ + rm /crontab + +# Setup the entrypoint +ENTRYPOINT ["./docker-entrypoint.sh"] + +# We run cron in foreground to update the certificates +CMD /usr/sbin/crond -f diff --git a/scripts/spcgeonode/letsencrypt/README.md b/scripts/spcgeonode/letsencrypt/README.md new file mode 100644 index 00000000000..363184d1dbf --- /dev/null +++ b/scripts/spcgeonode/letsencrypt/README.md @@ -0,0 +1,15 @@ +# Letsencrypt service for SPCGeonode + +This service generates SSL certificates to be used by Nginx. + +## Let's Encrypt + +Upon startup, it generates one SSL certificate from Let's Encrypt using Certbot. It then starts cron (in foreground) to renew the certificates using Certbot renew. + +If for some reason getting the certificate fails, a placeholder certificate is generated. This certificate is invalid, but still allows to encrypt the data and to start the webserver. + +To avoid hitting Let's Encrypt very low rate limits when developping or doing tests, LETSENCRYPT_MODE env var can be set to "disabled" (which will completely bypass Let'sEncrypt, simulating a failure) or to "staging" (using Let'sEncrypt test certificates with higher rates). + +## Autoissued + +An auto-issued certificate is also generate to be used on the LAN if needed. It is also renewed every now and then using the same cron process than above. diff --git a/scripts/spcgeonode/letsencrypt/crontab b/scripts/spcgeonode/letsencrypt/crontab new file mode 100644 index 00000000000..8380b16e446 --- /dev/null +++ b/scripts/spcgeonode/letsencrypt/crontab @@ -0,0 +1,8 @@ +# ┌───────────── minute (0 - 59) +# │ ┌───────────── hour (0 - 23) +# │ │ ┌───────────── day of month (1 - 31) +# │ │ │ ┌───────────── month (1 - 12) +# │ │ │ │ ┌───────────── day of week (0 - 6) (Sunday to Saturday; 7 is also Sunday on some systems) +# │ │ │ │ │ + + 0 0,12 * * * date && echo "daily " && /docker-entrypoint.sh diff --git a/scripts/spcgeonode/letsencrypt/docker-entrypoint.sh b/scripts/spcgeonode/letsencrypt/docker-entrypoint.sh new file mode 100644 index 00000000000..3199d4efc96 --- /dev/null +++ b/scripts/spcgeonode/letsencrypt/docker-entrypoint.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +# Exit script in case of error +set -e + +echo $"\n\n\n" +echo "-----------------------------------------------------" +echo "STARTING LETSENCRYPT ENTRYPOINT ---------------------" +date + +# We make the config dir +mkdir -p "/spcgeonode-certificates/$LETSENCRYPT_MODE" + +# Do not exit script in case of error +set +e + +# We run the command +if [ "$LETSENCRYPT_MODE" == "staging" ]; then + printf "\nTrying to get STAGING certificate\n" + certbot --config-dir "/spcgeonode-certificates/$LETSENCRYPT_MODE" certonly --webroot -w "/spcgeonode-certificates" -d "$HTTPS_HOST" -m "$ADMIN_EMAIL" --agree-tos --non-interactive --staging +elif [ "$LETSENCRYPT_MODE" == "production" ]; then + printf "\nTrying to get PRODUCTION certificate\n" + certbot --config-dir "/spcgeonode-certificates/$LETSENCRYPT_MODE" certonly --webroot -w "/spcgeonode-certificates" -d "$HTTPS_HOST" -m "$ADMIN_EMAIL" --agree-tos --non-interactive +else + printf "\nNot trying to get certificate (simulating failure, because LETSENCRYPT_MODE variable was neither staging nor production\n" + /bin/false +fi + +# If the certbot comand failed, we will create a placeholder certificate +if [ ! $? -eq 0 ]; then + # Exit script in case of error + set -e + + printf "\nFailed to get the certificates !\n" + + printf "\nWaiting 30s to avoid hitting Letsencrypt rate limits before it's even possible to react\n" + sleep 30 + + exit 1 +fi + +printf "\nCertificate have been created/renewed successfully\n" + +echo "-----------------------------------------------------" +echo "FINISHED LETSENCRYPT ENTRYPOINT ---------------------" +echo "-----------------------------------------------------" + +# Run the CMD +exec "$@" diff --git a/scripts/spcgeonode/nginx/Dockerfile b/scripts/spcgeonode/nginx/Dockerfile new file mode 100644 index 00000000000..ddfe4527cfe --- /dev/null +++ b/scripts/spcgeonode/nginx/Dockerfile @@ -0,0 +1,14 @@ +FROM nginx:1.13.7-alpine + +RUN apk add --no-cache openssl inotify-tools + +WORKDIR /etc/nginx/ + +ADD nginx.conf.envsubst nginx.https.available.conf.envsubst spcgeonode.conf ./ + +ADD docker-autoreload.sh docker-entrypoint.sh / +ENTRYPOINT ["/docker-entrypoint.sh"] +RUN chmod +x /docker-autoreload.sh +RUN chmod +x /docker-entrypoint.sh + +CMD ["nginx", "-g", "daemon off;"] \ No newline at end of file diff --git a/scripts/spcgeonode/nginx/docker-autoreload.sh b/scripts/spcgeonode/nginx/docker-autoreload.sh new file mode 100644 index 00000000000..8eb9aeabe31 --- /dev/null +++ b/scripts/spcgeonode/nginx/docker-autoreload.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# This will watch the /spcgeonode-certificates folder and run nginx -s reload whenever there are some changes. +# We use this to reload nginx config when certificates changed. + +# inspired/copied from https://github.com/kubernetes/kubernetes/blob/master/examples/https-nginx/auto-reload-nginx.sh + +while true +do + inotifywait -e create -e modify -e delete -e move -r --exclude "\\.certbot\\.lock|\\.well-known" "/spcgeonode-certificates/$LETSENCRYPT_MODE" + echo "Changes noticed in /spcgeonode-certificates" + + echo "Waiting 5s for additionnal changes" + sleep 5 + + echo "Creating symbolic link for WAN host" + # for some reason, the ln -f flag doesn't work below... + rm -f /certificate_symlink + if [ -f "/spcgeonode-certificates/$LETSENCRYPT_MODE/live/$HTTPS_HOST/fullchain.pem" ] && [ -f "/spcgeonode-certificates/$LETSENCRYPT_MODE/live/$HTTPS_HOST/privkey.pem" ]; then + echo "Certbot certificate exists, we symlink to the live cert" + ln -sf "/spcgeonode-certificates/$LETSENCRYPT_MODE/live/$HTTPS_HOST" /certificate_symlink + else + echo "Certbot certificate does not exist, we symlink to autoissued" + ln -sf "/spcgeonode-certificates/autoissued" /certificate_symlink + fi + + # Test nginx configuration + nginx -t + # If it passes, we reload + if [ $? -eq 0 ] + then + echo "Configuration valid, we reload..." + nginx -s reload + else + echo "Configuration not valid, we do not reload." + fi +done diff --git a/scripts/spcgeonode/nginx/docker-entrypoint.sh b/scripts/spcgeonode/nginx/docker-entrypoint.sh new file mode 100644 index 00000000000..25291362ff9 --- /dev/null +++ b/scripts/spcgeonode/nginx/docker-entrypoint.sh @@ -0,0 +1,54 @@ +#!/bin/sh + +# Exit script in case of error +set -e + +echo $"\n\n\n" +echo "-----------------------------------------------------" +echo "STARTING NGINX ENTRYPOINT ---------------------------" +date + +# We make the config dir +mkdir -p "/spcgeonode-certificates/$LETSENCRYPT_MODE" + +echo "Creating autoissued certificates for HTTP host" +if [ ! -f "/spcgeonode-certificates/autoissued/privkey.pem" ] || [[ $(find /spcgeonode-certificates/autoissued/privkey.pem -mtime +365 -print) ]]; then + echo "Autoissued certificate does not exist or is too old, we generate one" + mkdir -p "/spcgeonode-certificates/autoissued/" + openssl req -x509 -nodes -days 1825 -newkey rsa:2048 -keyout "/spcgeonode-certificates/autoissued/privkey.pem" -out "/spcgeonode-certificates/autoissued/fullchain.pem" -subj "/CN=${HTTP_HOST:-null}" +else + echo "Autoissued certificate already exists" +fi + +echo "Creating symbolic link for HTTPS certificate" +# for some reason, the ln -f flag doesn't work below... +# TODO : not DRY (reuse same scripts as docker-autoreload.sh) +rm -f /certificate_symlink +if [ -f "/spcgeonode-certificates/$LETSENCRYPT_MODE/live/$HTTPS_HOST/fullchain.pem" ] && [ -f "/spcgeonode-certificates/$LETSENCRYPT_MODE/live/$HTTPS_HOST/privkey.pem" ]; then + echo "Certbot certificate exists, we symlink to the live cert" + ln -sf "/spcgeonode-certificates/$LETSENCRYPT_MODE/live/$HTTPS_HOST" /certificate_symlink +else + echo "Certbot certificate does not exist, we symlink to autoissued" + ln -sf "/spcgeonode-certificates/autoissued" /certificate_symlink +fi + +echo "Replacing environement variables" +envsubst '\$HTTP_HOST \$HTTPS_HOST \$RESOLVER' < /etc/nginx/nginx.conf.envsubst > /etc/nginx/nginx.conf +envsubst '\$HTTP_HOST \$HTTPS_HOST \$RESOLVER' < /etc/nginx/nginx.https.available.conf.envsubst > /etc/nginx/nginx.https.available.conf + +echo "Enabling or not https configuration" +if [ -z "${HTTPS_HOST}" ]; then + echo "" > /etc/nginx/nginx.https.enabled.conf +else + ln -sf /etc/nginx/nginx.https.available.conf /etc/nginx/nginx.https.enabled.conf +fi + +echo "Loading nginx autoreloader" +sh /docker-autoreload.sh & + +echo "-----------------------------------------------------" +echo "FINISHED NGINX ENTRYPOINT ---------------------------" +echo "-----------------------------------------------------" + +# Run the CMD +exec "$@" diff --git a/scripts/spcgeonode/nginx/nginx.conf.envsubst b/scripts/spcgeonode/nginx/nginx.conf.envsubst new file mode 100644 index 00000000000..06af4254093 --- /dev/null +++ b/scripts/spcgeonode/nginx/nginx.conf.envsubst @@ -0,0 +1,36 @@ +# NOTE : $VARIABLES are env variables replaced by entrypoint.sh using envsubst +# not to be mistaken for nginx variables (also starting with $, but usually lowercase) + +events { + +} + +http{ + + # Allow Nginx to resolve Docker host names (see https://sandro-keil.de/blog/2017/07/24/let-nginx-start-if-upstream-host-is-unavailable-or-down/) + resolver $RESOLVER; # it seems rancher uses 169.254.169.250 instead of 127.0.0.11 which works well in docker-compose (see /etc/resolv.conf) + + # https - listens on specific name - this uses letsencrypt cert + # this includes a symlink that links either to nginx.https.available.conf if https in enabled + # or to an empty file if https is disabled. + include nginx.https.enabled.conf; + + # http - listens to specific HTTP_HOST only - this is not encrypted (not ideal but admissible on LAN for instance) + # even if not used (HTTP_HOST empty), we must keep it as it's used for internal API calls between django and geoserver + # TODO : do not use unencrypted connection even on LAN, but is it possible to have browser not complaining about unknown authority ? + server { + listen 80; + server_name $HTTP_HOST 127.0.0.1 nginx; + + include spcgeonode.conf; + } + + # Default server closes the connection (we can connect only using HTTP_HOST and HTTPS HOST) + server { + listen 80 default_server; + listen 443; + server_name _; + return 444; + } + +} \ No newline at end of file diff --git a/scripts/spcgeonode/nginx/nginx.https.available.conf.envsubst b/scripts/spcgeonode/nginx/nginx.https.available.conf.envsubst new file mode 100644 index 00000000000..2c9a52077a8 --- /dev/null +++ b/scripts/spcgeonode/nginx/nginx.https.available.conf.envsubst @@ -0,0 +1,32 @@ +# NOTE : $VARIABLES are env variables replaced by entrypoint.sh using envsubst +# not to be mistaken for nginx variables (also starting with $, but usually lowercase) + +# This file is to be included in the main nginx.conf configuration if HTTPS_HOST is set + +# this is the actual HTTPS host +server { + listen 443 ssl; + server_name $HTTPS_HOST; + + ssl_certificate /certificate_symlink/fullchain.pem; + ssl_certificate_key /certificate_symlink/privkey.pem; + + include spcgeonode.conf; +} + +# if we try to connect from http, we redirect to https +server { + listen 80; + server_name $HTTPS_HOST $HTTP_HOST; # TODO : once geoserver supports relative urls, we should allow access though both HTTP and HTTPS at the same time and hence remove HTTP_HOST from this line + + # Except for let's encrypt challenge + location /.well-known { + alias /spcgeonode-certificates/.well-known; + include /etc/nginx/mime.types; + } + + # Redirect to https + location / { + return 302 https://$HTTPS_HOST$request_uri; # TODO : we should use 301 (permanent redirect, but not practical for debug) + } +} diff --git a/scripts/spcgeonode/nginx/spcgeonode.conf b/scripts/spcgeonode/nginx/spcgeonode.conf new file mode 100644 index 00000000000..76835b10f10 --- /dev/null +++ b/scripts/spcgeonode/nginx/spcgeonode.conf @@ -0,0 +1,59 @@ +# This is the main gepgeonode conf + +charset utf-8; + +# max upload size +client_max_body_size 100G; + +# compression +gzip on; +gzip_proxied any; +gzip_types + text/css + text/javascript + text/xml + text/plain + application/javascript + application/x-javascript + application/json; + +# Geoserver +location /geoserver { + + # Using a variable is a trick to let Nginx start even if upstream host is not up yet + # (see https://sandro-keil.de/blog/2017/07/24/let-nginx-start-if-upstream-host-is-unavailable-or-down/) + set $upstream geoserver:8080; + + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_pass http://$upstream; +} + +# Django media +location /uploaded { + alias /spcgeonode-media; # your Django project's media files - amend as required + include /etc/nginx/mime.types; + expires 365d; +} + +location /static { + alias /spcgeonode-static; # your Django project's static files - amend as required + include /etc/nginx/mime.types; + expires 365d; +} + +# Finally, send all non-media requests to the Django server. +location / { + + # Using a variable is a trick to let Nginx start even if upstream host is not up yet + # (see https://sandro-keil.de/blog/2017/07/24/let-nginx-start-if-upstream-host-is-unavailable-or-down/) + set $upstream django:8000; + + uwsgi_pass $upstream; + + # uwsgi_params + include /etc/nginx/uwsgi_params; +} \ No newline at end of file diff --git a/scripts/spcgeonode/pgdumper/Dockerfile b/scripts/spcgeonode/pgdumper/Dockerfile new file mode 100644 index 00000000000..1aeb35d49a1 --- /dev/null +++ b/scripts/spcgeonode/pgdumper/Dockerfile @@ -0,0 +1,16 @@ +FROM python:2.7.13-alpine3.6 + +# 1-2. Install system dependencies (we only need the pg_dump binary from postgresql, other dependencies are in postgresql-client) +RUN apk add --no-cache postgresql-client && \ + apk add --no-cache --virtual BUIID_DEPS postgresql && \ + cp /usr/bin/pg_dump /bin/pg_dump && \ + apk del BUIID_DEPS + + +# The entrypoint creates the certificate +ADD crontab crontab +RUN /usr/bin/crontab crontab +RUN rm crontab + +# We run cron in foreground to update the certificates +CMD ["/usr/sbin/crond", "-f"] diff --git a/scripts/spcgeonode/pgdumper/crontab b/scripts/spcgeonode/pgdumper/crontab new file mode 100644 index 00000000000..1b59eaa6093 --- /dev/null +++ b/scripts/spcgeonode/pgdumper/crontab @@ -0,0 +1,8 @@ +# ┌───────────── minute (0 - 59) +# │ ┌───────────── hour (0 - 23) +# │ │ ┌───────────── day of month (1 - 31) +# │ │ │ ┌───────────── month (1 - 12) +# │ │ │ │ ┌───────────── day of week (0 - 6) (Sunday to Saturday; 7 is also Sunday on some systems) +# │ │ │ │ │ + + 0 0 * * * date && pg_dump -C -h postgres -U postgres postgres > /spcgeonode-pgdumps/latest.pgdump && echo "Dump successful" diff --git a/scripts/spcgeonode/rclone/Dockerfile b/scripts/spcgeonode/rclone/Dockerfile new file mode 100644 index 00000000000..118f0e6caaa --- /dev/null +++ b/scripts/spcgeonode/rclone/Dockerfile @@ -0,0 +1,29 @@ +FROM alpine:3.6 + +# Install deps +RUN apk add --no-cache libressl ca-certificates gettext + +# Install rclone +RUN wget https://downloads.rclone.org/v1.40/rclone-v1.40-linux-amd64.zip +RUN unzip /rclone-v1.40-linux-amd64.zip +RUN mv /rclone-v1.40-linux-amd64/rclone /usr/bin +RUN rm /rclone-v1.40-linux-amd64.zip +RUN rm -rf /rclone-v1.40-linux-amd64 + + +# Add scripts +ADD sync.sh /root/sync.sh +RUN chmod +x /root/sync.sh + +ADD docker-entrypoint.sh docker-entrypoint.sh +RUN chmod +x docker-entrypoint.sh + +ADD crontab crontab +RUN /usr/bin/crontab crontab +RUN rm crontab + +ADD rclone.s3.conf.envsubst rclone.s3.conf.envsubst + +# We run cron in foreground to update the certificates +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["/usr/sbin/crond", "-f"] diff --git a/scripts/spcgeonode/rclone/crontab b/scripts/spcgeonode/rclone/crontab new file mode 100644 index 00000000000..36c0972daa4 --- /dev/null +++ b/scripts/spcgeonode/rclone/crontab @@ -0,0 +1,8 @@ +# ┌───────────── minute (0 - 59) +# │ ┌───────────── hour (0 - 23) +# │ │ ┌───────────── day of month (1 - 31) +# │ │ │ ┌───────────── month (1 - 12) +# │ │ │ │ ┌───────────── day of week (0 - 6) (Sunday to Saturday; 7 is also Sunday on some systems) +# │ │ │ │ │ + + 30 */4 * * * date && echo "Running sync from cron job" && /root/sync.sh diff --git a/scripts/spcgeonode/rclone/docker-entrypoint.sh b/scripts/spcgeonode/rclone/docker-entrypoint.sh new file mode 100644 index 00000000000..044b1c56ab1 --- /dev/null +++ b/scripts/spcgeonode/rclone/docker-entrypoint.sh @@ -0,0 +1,51 @@ +#!/bin/sh + +# Exit script in case of error +set -e + +echo $"\n\n\n" +echo "-----------------------------------------------------" +echo "STARTING RCLONE ENTRYPOINT --------------------------" +date + +############################ +# 1. Assert there is data to be backed up +############################ + +echo "-----------------------------------------------------" +echo "1. Assert there is data to be backed up" + +if [ "$(ls -A /spcgeonode-geodatadir)" ] || [ "$(ls -A /spcgeonode-media)" ] || [ "$(ls -A /spcgeonode-pgdumps)" ]; then + echo 'Found data do backup' +else + # If all backups directories are empty, we quit, because + # we want to make sure backup works by running at least + # once instead of letting the user believe everything works fine + echo 'Nothing to backup, we quit...' + exit 1 +fi + +############################ +# 2. Replacing environment variables +############################ + +echo "-----------------------------------------------------" +echo "2. Replacing environment variables" +envsubst ' \$S3_ACCESS_KEY \$S3_SECRET_KEY \$S3_REGION \$S3_BUCKET' < /rclone.s3.conf.envsubst > /rclone.s3.conf +# TODO : remove this +cat /rclone.s3.conf + +############################ +# 3. Running once to ensure config works +############################ + +echo "-----------------------------------------------------" +echo "3. Running once to ensure config works" +/root/sync.sh + +echo "-----------------------------------------------------" +echo "FINISHED RCLONE ENTRYPOINT --------------------------" +echo "-----------------------------------------------------" + +# Run the CMD +exec "$@" diff --git a/scripts/spcgeonode/rclone/rclone.s3.conf.envsubst b/scripts/spcgeonode/rclone/rclone.s3.conf.envsubst new file mode 100644 index 00000000000..30737e005da --- /dev/null +++ b/scripts/spcgeonode/rclone/rclone.s3.conf.envsubst @@ -0,0 +1,15 @@ +############################################ +# Amazon S3 configuration +############################################ + +[spcgeonode_base] +type = s3 +acl = private +access_key_id = $S3_ACCESS_KEY +secret_access_key = $S3_SECRET_KEY +region = $S3_REGION +env_auth = false + +[spcgeonode] +type = alias +remote = spcgeonode_base:$S3_BUCKET diff --git a/scripts/spcgeonode/rclone/sync.sh b/scripts/spcgeonode/rclone/sync.sh new file mode 100644 index 00000000000..e6d4c52ab8c --- /dev/null +++ b/scripts/spcgeonode/rclone/sync.sh @@ -0,0 +1,14 @@ +#!/bin/sh + +# Exit script in case of error +set -e + +if [ ! -z "${S3_ACCESS_KEY}" ]; then + rclone sync -v --config /rclone.s3.conf /spcgeonode-geodatadir/ spcgeonode:geodatadir/ + rclone sync -v --config /rclone.s3.conf /spcgeonode-media/ spcgeonode:media/ + rclone sync -v --config /rclone.s3.conf /spcgeonode-pgdumps/ spcgeonode:pgdumps/ + + echo "S3 sync successful !!" +fi + +echo "Finished syncing"