EOX GitLab Instance

Skip to content
Snippets Groups Projects
Commit bc3386a8 authored by Lubomir Dolezal's avatar Lubomir Dolezal
Browse files

Merge branch 'master' into registrar-modularization

parents 08005098 323cc045
No related branches found
No related tags found
3 merge requests!36Staging to master to prepare 1.0.0 release,!32Registrar modularization,!27Registrar modularization
......@@ -16,27 +16,27 @@ build-master:
- VERSION_1=`grep 'version="*"' core/Dockerfile | cut -d '"' -f2`
- IMAGE_1="$CI_REGISTRY_IMAGE/pvs_core"
- docker pull "$IMAGE_1":latest || true
- docker build --cache-from "$IMAGE_1":latest -t "$IMAGE_1":latest -t "$IMAGE_1":$VERSION_1 core/
- docker build --cache-from "$IMAGE_1":latest -t "$IMAGE_1":dev -t "$IMAGE_1":$VERSION_1 core/
- VERSION_2=`grep 'version="*"' preprocessor/Dockerfile | cut -d '"' -f2`
- IMAGE_2="$CI_REGISTRY_IMAGE/pvs_preprocessor"
- docker pull "$IMAGE_2":latest || true
- docker build --cache-from "$IMAGE_2":latest -t "$IMAGE_2":latest -t "$IMAGE_2":$VERSION_2 preprocessor/
- docker build --cache-from "$IMAGE_2":latest -t "$IMAGE_2":dev -t "$IMAGE_2":$VERSION_2 preprocessor/
- VERSION_3=`grep 'version="*"' client/Dockerfile | cut -d '"' -f2`
- IMAGE_3="$CI_REGISTRY_IMAGE/pvs_client"
- docker pull "$IMAGE_3":latest || true
- docker build --cache-from "$IMAGE_3":latest -t "$IMAGE_3":latest -t "$IMAGE_3":$VERSION_3 client/
- docker build --cache-from "$IMAGE_3":latest -t "$IMAGE_3":dev -t "$IMAGE_3":$VERSION_3 client/
- VERSION_4=`grep 'version="*"' cache/Dockerfile | cut -d '"' -f2`
- IMAGE_4="$CI_REGISTRY_IMAGE/pvs_cache"
- docker pull "$IMAGE_4":latest || true
- docker build --cache-from "$IMAGE_4":latest -t "$IMAGE_4":latest -t "$IMAGE_4":$VERSION_4 cache/
- docker build --cache-from "$IMAGE_4":latest -t "$IMAGE_4":dev -t "$IMAGE_4":$VERSION_4 cache/
- VERSION_5=`grep 'version="*"' fluentd/Dockerfile | cut -d '"' -f2`
- IMAGE_5="$CI_REGISTRY_IMAGE/fluentd"
- docker pull "$IMAGE_5":latest || true
- docker build --cache-from "$IMAGE_5":latest -t "$IMAGE_5":latest -t "$IMAGE_5":$VERSION_5 fluentd/
- docker build --cache-from "$IMAGE_5":latest -t "$IMAGE_5":dev -t "$IMAGE_5":$VERSION_5 fluentd/
- VERSION_6=`grep 'version="*"' ingestor/Dockerfile | cut -d '"' -f2`
- IMAGE_6="$CI_REGISTRY_IMAGE/pvs_ingestor"
- docker pull "$IMAGE_6":latest || true
- docker build --cache-from "$IMAGE_6":latest -t "$IMAGE_6":latest -t "$IMAGE_6":$VERSION_6 ingestor/
- docker build --cache-from "$IMAGE_6":latest -t "$IMAGE_6":dev -t "$IMAGE_6":$VERSION_6 ingestor/
- cd ./testing && ./gitlab_test.sh
- if [ $? -ne 0 ]; then exit 1; fi # actually fail build
- docker push "$IMAGE_1":$VERSION_1
......@@ -53,7 +53,7 @@ build-master:
- docker push "$IMAGE_6":latest
only:
- master
- tags
build:
image: docker:latest
stage: build
......@@ -64,28 +64,23 @@ build:
script:
- IMAGE="$CI_REGISTRY_IMAGE/pvs_core"
- docker pull "$IMAGE":latest || true
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME core/
- docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev core/
- IMAGE="$CI_REGISTRY_IMAGE/pvs_preprocessor"
- docker pull "$IMAGE":latest || true
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME preprocessor/
- docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev preprocessor/
- IMAGE="$CI_REGISTRY_IMAGE/pvs_client"
- docker pull "$IMAGE":latest || true
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME client/
- docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev client/
- IMAGE="$CI_REGISTRY_IMAGE/pvs_cache"
- docker pull "$IMAGE":latest || true
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME cache/
- docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev cache/
- IMAGE="$CI_REGISTRY_IMAGE/fluentd"
- docker pull "$IMAGE":latest || true
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME fluentd/
- docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev fluentd/
- IMAGE="$CI_REGISTRY_IMAGE/pvs_ingestor"
- docker pull "$IMAGE":latest || true
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME ingestor/
- cd ./testing && ./gitlab_test.sh && cd -
- docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev ingestor/
- cd ./testing && ./gitlab_test.sh
- if [ $? -ne 0 ]; then exit 1; fi # actually fail build
except:
- master
......@@ -187,25 +187,17 @@ docker swarm init # initialize swarm
```
Build images:
Note we use **dev** tag for local development, so images need to be built locally
```
docker build core/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_core -t registry.gitlab.eox.at/esa/prism/vs/pvs_core
docker build cache/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_cache -t registry.gitlab.eox.at/esa/prism/vs/pvs_cache
docker build preprocessor/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor -t registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor
docker build client/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_client -t registry.gitlab.eox.at/esa/prism/vs/pvs_client
docker build fluentd/ --cache-from registry.gitlab.eox.at/esa/prism/vs/fluentd -t registry.gitlab.eox.at/esa/prism/vs/fluentd
docker build ingestor/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor -t registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor
```
Or pull them from the registry:
```
docker login -u {DOCKER_USER} -p {DOCKER_PASSWORD} registry.gitlab.eox.at
docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_core
docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_cache
docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor
docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_client
docker pull registry.gitlab.eox.at/esa/prism/vs/fluentd
docker pull registry.gitlab.eox.at/esa/prism/vs/ingestor
docker build core/ -t registry.gitlab.eox.at/esa/prism/vs/pvs_core:dev
docker build cache/ -t registry.gitlab.eox.at/esa/prism/vs/pvs_cache:dev
docker build preprocessor/ -t registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:dev
docker build client/ -t registry.gitlab.eox.at/esa/prism/vs/pvs_client:dev
docker build fluentd/ -t registry.gitlab.eox.at/esa/prism/vs/fluentd:dev
docker build ingestor/ -t registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor:dev
```
For production deployment, as registry is open to public, this part is done by a later step `Deploy the stack in production` as it will pull necessary images automatically.
Create external network for stack to run:
```
docker network create -d overlay vhr18-extnet
......@@ -216,12 +208,16 @@ Add following .env files with credentials to the cloned copy of the repository /
create docker secrets:
Sensitive environment variables are not included in the .env files, and must be generated as docker secrets. All stacks currently share these secret names, therefore it must stay the same for all stacks. To create docker secrets run:
Sensitive environment variables are not included in the .env files, and must be generated as docker secrets. All stacks currently share these secret names, therefore it must stay the same for all stacks. The same goes for sftp configuration values, To create docker secrets, and configs run:
```bash
# secret creation
# replace the "<variable>" with the value of the secret
printf "<OS_PASSWORD_DOWNLOAD>" | docker secret create OS_PASSWORD_DOWNLOAD -
printf "<DJANGO_PASSWORD>" | docker secret create DJANGO_PASSWORD -
printf "<OS_PASSWORD>" | docker secret create OS_PASSWORD -
# configs creation
printf "<user>:<password>:<UID>:<GID>" | docker config create sftp-users -
# for production base stack deployment, additional basic authentication credentials list need to be created
# format of such a list used by traefik are username:hashedpassword (MD5, SHA1, BCrypt)
sudo apt-get install apache2-utils
......@@ -235,10 +231,17 @@ Deploy the stack in dev environment:
docker stack deploy -c docker-compose.vhr18.yml -c docker-compose.vhr18.dev.yml -c docker-compose.logging.yml -c docker-compose.logging.dev.yml vhr18-pvs # start VHR_IMAGE_2018 stack in dev mode, for example to use local sources
docker stack deploy -c docker-compose.emg.yml -c docker-compose.emg.dev.yml -c docker-compose.logging.yml -c docker-compose.logging.dev.yml emg-pvs # start Emergency stack in dev mode, for example to use local sources
```
Deploy base stack in production environment:
Deploy base & logging stack in production environment:
```
docker stack deploy -c docker-compose.base.ops.yml base-pvs
docker stack deploy -c docker-compose.logging.yml docker-compose.logging.ops.yml logging
```
Deploy the stack in production environment:
Please note that in order to reuse existing database volumes, <stack-name> needs to be the same. Here we use `vhr18-pvs` but in operational service `vhr18-pdas` is used.
```
docker stack deploy -c docker-compose.vhr18.yml -c docker-compose.vhr18.ops.yml vhr18-pvs
```
First steps:
```
# To register first data, use the following command inside the registrar container:
......@@ -280,16 +283,16 @@ On production machine, `fluentd` is set as a logging driver for docker daemon by
```
### setup sftp
The `SFTP` image allow remote access into 2 logging folders, you can define (edit/add) users, passwords and (UID/GID) in the respective configuration file ( e.g *config/vhr_sftp_users.conf* ).
The `SFTP` image allow remote access into 2 logging folders, you can define (edit/add) users, passwords and (UID/GID) using `docker config create` mentioned above.
The default username is `eox`, once the stack is deployed you can sftp into the logging folders through port 2222 on -if you are running the dev stack- localhost :
In the below example the username is `eox`, once the stack is deployed you can sftp into the logging folders through port 2222 on -if you are running the dev stack- localhost :
```bash
sftp -P 2222 eox@127.0.0.1
```
You will log in into`/home/eox/data` directory which contains the 2 logging directories : `to/panda` and `from/fepd`
**NOTE:** The mounted directory that you are directed into is *`/home/user`*, where `user` is the username, hence when changing the username in the `.conf` file, the `sftp` mounted volumes path in `docker-compose.<collection>.yml` must change respectively.
**NOTE:** The mounted directory that you are directed into is *`/home/user`*, where `user` is the username, hence when setting / editing the username in configs, the `sftp` mounted volumes path in `docker-compose.<collection>.yml` must change respectively.
# Documentation
......
......@@ -9,7 +9,12 @@ services:
- type: bind
source: ./data/
target: /data/
ingestor:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor:dev
fluentd:
image: registry.gitlab.eox.at/esa/prism/vs/fluentd:dev
client:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_client:dev
ports:
- "80:80"
configs:
......@@ -20,6 +25,7 @@ services:
source: ./data/
target: /data/
renderer:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:dev
ports:
- "81:80"
- "82:8080"
......@@ -28,6 +34,7 @@ services:
source: ./data/
target: /data/
registrar:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:dev
volumes:
- type: bind
source: ./data/
......@@ -36,6 +43,7 @@ services:
source: ./core/
target: /core/
cache:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_cache:dev
ports:
- "83:80"
volumes:
......@@ -46,6 +54,7 @@ services:
- source: mapcache-dev
target: /mapcache-template.xml
preprocessor:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:dev
volumes:
- type: tmpfs
target: /tmp
......
......@@ -9,7 +9,12 @@ services:
- type: bind
source: ./data/
target: /data/
ingestor:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor:dev
fluentd:
image: registry.gitlab.eox.at/esa/prism/vs/fluentd:dev
client:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_client:dev
ports:
- "80:80"
configs:
......@@ -20,6 +25,7 @@ services:
source: ./data/
target: /data/
renderer:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:dev
ports:
- "81:80"
- "82:8080"
......@@ -28,6 +34,7 @@ services:
source: ./data/
target: /data/
registrar:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:dev
volumes:
- type: bind
source: ./data/
......@@ -36,6 +43,7 @@ services:
source: ./core/
target: /core/
cache:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_cache:dev
ports:
- "83:80"
volumes:
......@@ -46,6 +54,7 @@ services:
- source: mapcache-dev
target: /mapcache-template.xml
preprocessor:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:dev
volumes:
- type: tmpfs
target: /tmp
......
......@@ -9,7 +9,12 @@ services:
- type: bind
source: ./data/
target: /data/
ingestor:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor:dev
fluentd:
image: registry.gitlab.eox.at/esa/prism/vs/fluentd:dev
client:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_client:dev
ports:
- "80:80"
configs:
......@@ -20,6 +25,7 @@ services:
source: ./data/
target: /data/
renderer:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:dev
ports:
- "81:80"
- "82:8080"
......@@ -28,6 +34,7 @@ services:
source: ./data/
target: /data/
registrar:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:dev
volumes:
- type: bind
source: ./data/
......@@ -36,6 +43,7 @@ services:
source: ./core/
target: /core/
cache:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_cache:dev
ports:
- "83:80"
volumes:
......@@ -46,6 +54,7 @@ services:
- source: mapcache-dev
target: /mapcache-template.xml
preprocessor:
image: registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:dev
volumes:
- type: tmpfs
target: /tmp
......
......@@ -213,33 +213,6 @@ These are the internal access credentials for the database:
* ``DB_PORT``
* ``DB_NAME``
Sensitive variables
^^^^^^^^^^^^^^^^^^^
Since environment variables include credentials that are considered sensitive,
avoiding their exposure inside ``.env`` files would be the right practice.
In order to manage transmitting sensitive data securely into the respective containers,
docker secrets with the values of these variables should be created. Currently, three
variables have to be saved as docker secrets before deploying the swarm:
``OS_PASSWORD``, ``OS_PASSWORD_DOWNLOAD`` and ``DJANGO_PASSWORD``.
Two other docker secrets need to be created for traefik basic authentication:
``BASIC_AUTH_USERS_AUTH`` - used for access to services, ``BASIC_AUTH_USERS_APIAUTH`` - used for admin access to kibana and traefik.
These secrets should be text files containing a list of username:hashedpassword (MD5, SHA1, BCrypt) pairs.
An example of creating ``OS_PASSWORD`` as secret using the following command :
.. code-block:: bash
printf "<password_value>" | docker secret create OS_PASSWORD -
An example of creating ``BASIC_AUTH_USERS_AUTH`` secret:
.. code-block:: bash
htpasswd -nb user1 3vYxfRqUx4H2ar3fsEOR95M30eNJne >> auth_list.txt
htpasswd -nb user2 YyuN9bYRvBUUU6COx7itWw5qyyARus >> auth_list.txt
docker secret create BASIC_AUTH_USERS_AUTH auth_list.txt
Configuration Files
-------------------
......@@ -520,5 +493,43 @@ preprocessing
define specific step settings, even overriding the values from the
defaults.
Sensitive variables
^^^^^^^^^^^^^^^^^^^
Since environment variables include credentials that are considered sensitive,
avoiding their exposure inside ``.env`` files would be the right practice.
In order to manage transmitting sensitive data securely into the respective containers,
docker secrets with the values of these variables should be created. Currently, three
variables have to be saved as docker secrets before deploying the swarm:
``OS_PASSWORD``, ``OS_PASSWORD_DOWNLOAD`` and ``DJANGO_PASSWORD``.
Two other docker secrets need to be created for traefik basic authentication:
``BASIC_AUTH_USERS_AUTH`` - used for access to services, ``BASIC_AUTH_USERS_APIAUTH`` - used for admin access to kibana and traefik.
These secrets should be text files containing a list of username:hashedpassword (MD5, SHA1, BCrypt) pairs.
Additionally, the configuration of the ``sftp`` image containes sensitive information, and therefore, is created using docker configs.
An example of creating configurations for sftp image using the following command :
.. code-block:: bash
printf "<user>:<password>:<UID>:<GID>" | docker config create sftp-users -
An example of creating ``OS_PASSWORD`` as secret using the following command :
.. code-block:: bash
printf "<password_value>" | docker secret create OS_PASSWORD -
An example of creating ``BASIC_AUTH_USERS_AUTH`` secret:
.. code-block:: bash
htpasswd -nb user1 3vYxfRqUx4H2ar3fsEOR95M30eNJne >> auth_list.txt
htpasswd -nb user2 YyuN9bYRvBUUU6COx7itWw5qyyARus >> auth_list.txt
docker secret create BASIC_AUTH_USERS_AUTH auth_list.txt
The next section :ref:`management` describes how an operator interacts with a
deployed VS stack.
......@@ -59,6 +59,21 @@ A new deployment of the stack will use the updated configuration. The above
mentioned process necessarily involves a certain service downtime between
shutting down of the stack and new deployment.
Inspecting reports
------------------
Once registered, a xml report containes wcs and wms getcapabilities of the registered product is generated and can be accessed by sftp into `SFTP` image,
In order to log into the logging folders through port 2222 on the hosting ip (e.g. localhost if you are running the dev stack ) The following command can be used:
.. code-block:: bash
sftp -P 2222 <username>@<host>
this will direct the user into `/home/<username>/data` directory which contains the 2 logging directories : `to/panda` and `from/fepd`
.. Note:: The mounted directory that the user is directed into is *`/home/user`*, where `user` is the username, hence when changing the username in the `.conf` file, the `sftp` mounted volumes path in `docker-compose.<collection>.yml` must be changed respectively.
Inspecting logs
---------------
......
import requests
import pytest
import csv
import subprocess
import json
from xml.etree import ElementTree
from osgeo import gdal
from urllib.parse import unquote
@pytest.fixture
def identifiers():
with open('./product_list.csv') as f:
yield csv.reader(f)
def get_requests (url, service_list, service, request):
response = requests.get(url = url)
catalog = ElementTree.fromstring(response.content)
entries = catalog.findall('{http://www.w3.org/2005/Atom}entry')
for entry in entries :
offers = entry.findall('{http://www.opengis.net/owc/1.0}offering[@code="http://www.opengis.net/spec/owc-atom/1.0/req/%s"]' % service)
for offer in offers :
services = offer.findall('{http://www.opengis.net/owc/1.0}operation[@code="%s"]' % request)
if len(services) > 0 :
service_list.append(services[0].get('href'))
return service_list
def test_renderer(identifiers):
wms_items = get_requests('http://docker:81/opensearch/collections/Emergency/atom/', [], 'wms', 'GetMap')
for row in identifiers:
identifier = row[0].split('/')[4]
for item in wms_items:
if identifier in unquote(unquote(item)) :
wms_response = requests.get(url = item)
# wms succsess
assert wms_response.status_code == 200
def test_wcs(identifiers):
wcs_items = get_requests('http://docker:81/opensearch/collections/Emergency/atom/', [], 'wcs', 'GetCoverage')
for row in identifiers:
identifier = row[0].split('/')[4]
for item in wcs_items:
if identifier in unquote(unquote(item)) :
wcs_response = requests.get(url = item + '&scalesize=x(50),y(50)')
data = wcs_response.content
with open('temp.tif', 'wb') as f:
f.write(data)
image = gdal.Open('temp.tif', gdal.GA_ReadOnly)
srcband = image.GetRasterBand(1)
# wcs succsess
assert wcs_response.status_code == 200
assert srcband.Checksum() != None
assert srcband.Checksum() > 0
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment