diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 589d1e4ac6a4401bca0e7844f8c10d529c44feca..658937aad7216c60be89e3761c8e8b82192f273b 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -13,36 +13,43 @@ build-master:
   before_script:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
   script:
-    - VERSION=`grep 'version="*"' core/Dockerfile | cut -d '"' -f2`
-    - IMAGE="$CI_REGISTRY_IMAGE/pvs_core"
-    - docker pull "$IMAGE":latest || true
-    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":latest -t "$IMAGE":$VERSION core/
-    - docker push "$IMAGE":$VERSION
-    - docker push "$IMAGE":latest
-    - VERSION=`grep 'version="*"' preprocessor/Dockerfile | cut -d '"' -f2`
-    - IMAGE="$CI_REGISTRY_IMAGE/pvs_preprocessor"
-    - docker pull "$IMAGE":latest || true
-    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":latest -t "$IMAGE":$VERSION preprocessor/
-    - docker push "$IMAGE":$VERSION
-    - docker push "$IMAGE":latest
-    - VERSION=`grep 'version="*"' client/Dockerfile | cut -d '"' -f2`
-    - IMAGE="$CI_REGISTRY_IMAGE/pvs_client"
-    - docker pull "$IMAGE":latest || true
-    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":latest -t "$IMAGE":$VERSION client/
-    - docker push "$IMAGE":$VERSION
-    - docker push "$IMAGE":latest
-    - VERSION=`grep 'version="*"' cache/Dockerfile | cut -d '"' -f2`
-    - IMAGE="$CI_REGISTRY_IMAGE/pvs_cache"
-    - docker pull "$IMAGE":latest || true
-    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":latest -t "$IMAGE":$VERSION cache/
-    - docker push "$IMAGE":$VERSION
-    - docker push "$IMAGE":latest
-    - VERSION=`grep 'version="*"' ingestor/Dockerfile | cut -d '"' -f2`
-    - IMAGE="$CI_REGISTRY_IMAGE/pvs_ingestor"
-    - docker pull "$IMAGE":latest || true
-    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":latest -t "$IMAGE":$VERSION ingestor/
-    - docker push "$IMAGE":$VERSION
-    - docker push "$IMAGE":latest
+    - VERSION_1=`grep 'version="*"' core/Dockerfile | cut -d '"' -f2`
+    - IMAGE_1="$CI_REGISTRY_IMAGE/pvs_core"
+    - docker pull "$IMAGE_1":latest || true
+    - docker build --cache-from "$IMAGE_1":latest -t "$IMAGE_1":latest -t "$IMAGE_1":$VERSION_1 core/
+    - VERSION_2=`grep 'version="*"' preprocessor/Dockerfile | cut -d '"' -f2`
+    - IMAGE_2="$CI_REGISTRY_IMAGE/pvs_preprocessor"
+    - docker pull "$IMAGE_2":latest || true
+    - docker build --cache-from "$IMAGE_2":latest -t "$IMAGE_2":latest -t "$IMAGE_2":$VERSION_2 preprocessor/
+    - VERSION_3=`grep 'version="*"' client/Dockerfile | cut -d '"' -f2`
+    - IMAGE_3="$CI_REGISTRY_IMAGE/pvs_client"
+    - docker pull "$IMAGE_3":latest || true
+    - docker build --cache-from "$IMAGE_3":latest -t "$IMAGE_3":latest -t "$IMAGE_3":$VERSION_3 client/
+    - VERSION_4=`grep 'version="*"' cache/Dockerfile | cut -d '"' -f2`
+    - IMAGE_4="$CI_REGISTRY_IMAGE/pvs_cache"
+    - docker pull "$IMAGE_4":latest || true
+    - docker build --cache-from "$IMAGE_4":latest -t "$IMAGE_4":latest -t "$IMAGE_4":$VERSION_4 cache/
+    - VERSION_5=`grep 'version="*"' fluentd/Dockerfile | cut -d '"' -f2`
+    - IMAGE_5="$CI_REGISTRY_IMAGE/fluentd"
+    - docker pull "$IMAGE_5":latest || true
+    - docker build --cache-from "$IMAGE_5":latest -t "$IMAGE_5":latest -t "$IMAGE_5":$VERSION_5 fluentd/
+    - VERSION_6=`grep 'version="*"' ingestor/Dockerfile | cut -d '"' -f2`
+    - IMAGE_6="$CI_REGISTRY_IMAGE/pvs_ingestor"
+    - docker pull "$IMAGE_6":latest || true
+    - docker build --cache-from "$IMAGE_6":latest -t "$IMAGE_6":latest -t "$IMAGE_6":$VERSION_6 ingestor/
+    - cd ./testing && ./gitlab_test.sh && cd -
+    - docker push "$IMAGE_1":$VERSION_1
+    - docker push "$IMAGE_1":latest
+    - docker push "$IMAGE_2":$VERSION_2
+    - docker push "$IMAGE_2":latest
+    - docker push "$IMAGE_3":$VERSION_3
+    - docker push "$IMAGE_3":latest
+    - docker push "$IMAGE_4":$VERSION_4
+    - docker push "$IMAGE_4":latest
+    - docker push "$IMAGE_5":$VERSION_5
+    - docker push "$IMAGE_5":latest
+    - docker push "$IMAGE_6":$VERSION_6
+    - docker push "$IMAGE_6":latest
   only:
     - master
 
@@ -55,27 +62,29 @@ build:
     - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
   script:
     - IMAGE="$CI_REGISTRY_IMAGE/pvs_core"
-    - docker pull "$IMAGE":$CI_COMMIT_REF_NAME || true
-    - docker build --cache-from "$IMAGE":$CI_COMMIT_REF_NAME -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME core/
+    - docker pull "$IMAGE":latest || true
+    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME core/
     - docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
     - IMAGE="$CI_REGISTRY_IMAGE/pvs_preprocessor"
-    - docker pull "$IMAGE":$CI_COMMIT_REF_NAME || true
-    - docker build --cache-from "$IMAGE":$CI_COMMIT_REF_NAME -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME preprocessor/
+    - docker pull "$IMAGE":latest || true
+    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME preprocessor/
     - docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
     - IMAGE="$CI_REGISTRY_IMAGE/pvs_client"
-    - docker pull "$IMAGE":$CI_COMMIT_REF_NAME || true
-    - docker build --cache-from "$IMAGE":$CI_COMMIT_REF_NAME -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME client/
+    - docker pull "$IMAGE":latest || true
+    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME client/
     - docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
     - IMAGE="$CI_REGISTRY_IMAGE/pvs_cache"
-    - docker pull "$IMAGE":$CI_COMMIT_REF_NAME || true
-    - docker build --cache-from "$IMAGE":$CI_COMMIT_REF_NAME -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME cache/
-    - docker push "$IMAGE:$CI_COMMIT_REF_SLUG"
-    - docker push "$IMAGE:$CI_COMMIT_REF_NAME"
+    - docker pull "$IMAGE":latest || true
+    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME cache/
+    - docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
+    - IMAGE="$CI_REGISTRY_IMAGE/fluentd"
+    - docker pull "$IMAGE":latest || true
+    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME fluentd/
+    - docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest"
     - IMAGE="$CI_REGISTRY_IMAGE/pvs_ingestor"
     - docker pull "$IMAGE":$CI_COMMIT_REF_NAME || true
-    - docker build --cache-from "$IMAGE":$CI_COMMIT_REF_NAME -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME ingestor/
-    - docker push "$IMAGE:$CI_COMMIT_REF_SLUG"
-    - docker push "$IMAGE:$CI_COMMIT_REF_NAME"
+    - docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME ingestor/
+    - cd ./testing && ./gitlab_test.sh && cd -
   except:
     - master
-  
+
diff --git a/README.md b/README.md
index 4156a8f6855c64c0da5da3297f510fb426888b83..a50a21e7080e757ec3634d84069e5dabaebaef34 100644
--- a/README.md
+++ b/README.md
@@ -170,7 +170,9 @@ docker build core/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_core -t
 docker build cache/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_cache -t registry.gitlab.eox.at/esa/prism/vs/pvs_cache
 docker build preprocessor/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor -t registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor
 docker build client/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_client -t registry.gitlab.eox.at/esa/prism/vs/pvs_client
+docker build fluentd/ --cache-from registry.gitlab.eox.at/esa/prism/vs/fluentd -t registry.gitlab.eox.at/esa/prism/vs/fluentd
 docker build ingestor/ --cache-from registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor -t registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor
+
 ```
 Or pull them from the registry:
 ```
@@ -179,18 +181,27 @@ docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_core
 docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_cache
 docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor
 docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_client
+docker pull registry.gitlab.eox.at/esa/prism/vs/fluentd
+```
+Create external network for stack to run:
+```
+docker network create -d overlay vhr18-extnet
+docker network create -d overlay emg-extnet
 ```
+Add following .env files with credentials to the cloned copy of the repository /env folder: `vhr18_db.env`, `vhr18_obs.env`, `vhr18_django.env`.
 
 Deploy the stack:
 ```
-docker stack deploy -c docker-compose.vhr18.yml -c docker-compose.vhr18.dev.yml vhr18-pvs  # start VHR_IMAGE_2018 stack in dev mode, for example to use local sources
-docker stack deploy -c docker-compose.emg.yml -c docker-compose.emg.dev.yml emg-pvs  # start Emergency stack in dev mode, for example to use local sources
-
-docker exec -it $(docker ps -qf "name=vhr18-pvs_renderer") /bin/bash
-cd /var/www/pvs/dev/pvs_instance
-python manage.py runserver 0.0.0.0:8080
+docker stack deploy -c docker-compose.vhr18.yml -c docker-compose.vhr18.dev.yml -c docker-compose.logging.yml -c docker-compose.logging.dev.yml vhr18-pvs  # start VHR_IMAGE_2018 stack in dev mode, for example to use local sources
+docker stack deploy -c docker-compose.emg.yml -c docker-compose.emg.dev.yml emg-pvs -c docker-compose.logging.yml -c docker-compose.logging.dev.yml # start Emergency stack in dev mode, for example to use local sources
+```
+First steps:
 ```
+# To register first data, use the following command inside the registrar container:
+UPLOAD_CONTAINER=<product_bucket_name> && python3 registrar.py --objects-prefix <product_object_storage_item_prefix>
+# To see the catalog opensearch response in the attached web client, a browser CORS extension needs to be turned on.
 
+```
 Tear town stack including data:
 
 ```bash
@@ -201,6 +212,16 @@ docker volume rm vhr18-pvs_traefik-data
 docker volume rm vhr18-pvs_instance-data
 ```
 
+### Setup logging
+
+To access the logs, navigate to http://localhost:5601 . Ignore all of the fancy enterprise capabilities and select Kibana > Discover in the hamburger menu.
+
+On first run, you need to define an index pattern to select the data source for kibana in elastic search.
+Since we only have fluentd, you can just use `*` as index pattern.
+Select `@timestamp` as time field
+([see also](https://www.elastic.co/guide/en/kibana/current/tutorial-define-index.html)).
+
+
 # Documentation
 
 ## Installation
diff --git a/cache/run-httpd.sh b/cache/run-httpd.sh
index 0f0567a836253fbfda02e5ec5914bf8505ef7449..fc3cd5dd5012ad5cb20f597abb5c3e9b920a0e54 100755
--- a/cache/run-httpd.sh
+++ b/cache/run-httpd.sh
@@ -1,7 +1,7 @@
 #!/bin/bash -e
 
-/configure.sh
+/configure.sh >&2
 
-echo "Running Apache server"
+echo "Running Apache server" >&2
 rm -rf /run/apache2/* /var/run/apache2/* /tmp/apache2*
 exec /usr/sbin/apache2ctl -D FOREGROUND
diff --git a/config/emg_init-db.sh b/config/emg_init-db.sh
index 73f9c1129b1767c66ccd09c6e920e143190b9328..5944a22d495d77b97a2b2e7dfa9cee068742c984 100644
--- a/config/emg_init-db.sh
+++ b/config/emg_init-db.sh
@@ -92,7 +92,7 @@ if python3 manage.py id check "${COLLECTION}"; then
             --blue-range 0 255 \
             --red-nodata 0 \
             --green-nodata 0 \
-            --blue-nodata 0        
+            --blue-nodata 0
         # EQ02_3
         python3 manage.py producttype create "${COLLECTION}"_Product_EQ02_3 --traceback \
             --coverage-type "RGB"
@@ -125,7 +125,7 @@ if python3 manage.py id check "${COLLECTION}"; then
             --blue-range 0 500 \
             --red-nodata 0 \
             --green-nodata 0 \
-            --blue-nodata 0  
+            --blue-nodata 0
         # EQ02_4
         python3 manage.py producttype create "${COLLECTION}"_Product_EQ02_4 --traceback \
             --coverage-type "RGBNir"
@@ -204,7 +204,7 @@ if python3 manage.py id check "${COLLECTION}"; then
             --blue-range 0 800 \
             --red-nodata 0 \
             --green-nodata 0 \
-            --blue-nodata 0  
+            --blue-nodata 0
         # EW02_4
         python3 manage.py producttype create "${COLLECTION}"_Product_EW02_4 --traceback \
             --coverage-type "RGBNir"
@@ -307,7 +307,7 @@ if python3 manage.py id check "${COLLECTION}"; then
             --blue-range 0 800 \
             --red-nodata 0 \
             --green-nodata 0 \
-            --blue-nodata 0  
+            --blue-nodata 0
         # EW03_4
         python3 manage.py producttype create "${COLLECTION}"_Product_EW03_4 --traceback \
             --coverage-type "RGBNir"
@@ -851,19 +851,6 @@ if python3 manage.py id check "${COLLECTION}"; then
         python3 manage.py browsetype create "${COLLECTION}"_Product_SP07 "NDVI" --traceback \
             --grey "(nir-red)/(nir+red)" --grey-range -1 1
         # PH1A
-        python3 manage.py producttype create "${COLLECTION}"_Product_PH1A --traceback \
-            --coverage-type "RGBNir"
-        python3 manage.py browsetype create "${COLLECTION}"_Product_PH1A  --traceback \
-            --red "red" \
-            --green "green" \
-            --blue "blue" \
-            --red-range 1 1000 \
-            --green-range 1 1000 \
-            --blue-range 1 1000 \
-            --red-nodata 0 \
-            --green-nodata 0 \
-            --blue-nodata 0
-        # PH1A
         python3 manage.py producttype create "${COLLECTION}"_Product_PH1A --traceback \
             --coverage-type "RGBNir"
         python3 manage.py browsetype create "${COLLECTION}"_Product_PH1A  --traceback \
diff --git a/core/configure.sh b/core/configure.sh
index adbc5dade19769f530bda038ac0385250fbf350c..3966e886e6f9e09c4c1a41c3fe2b1542466db9e5 100644
--- a/core/configure.sh
+++ b/core/configure.sh
@@ -1,52 +1,54 @@
 #!/bin/bash
-echo "Running configure.sh"
-
-# Configure instance
-sed -e "s/'disable_existing_loggers': True,/'disable_existing_loggers': False,/" -i pvs_instance/settings.py
-HANDLERS="'handlers': {\n
-    'null': {\n
-        'level':'DEBUG',\n
-        'class':'logging.NullHandler',\n
-    },\n
-    'console': {\n
-        'level': 'DEBUG' if DEBUG else 'INFO',\n
-        'class': 'logging.StreamHandler',\n
-        'formatter': 'verbose' if DEBUG else 'simple',\n
-        'filters': [],\n
-    },\n
-},"
-LOGGERS="'loggers': {\n
-    'eoxserver': {\n
-        'handlers': ['console'],\n
-        'level': 'DEBUG' if DEBUG else 'INFO',\n
-        'propagate': False,\n
-    },
-}"
-sed -e "/^    'handlers': {$/,/^    },$/c `echo ${HANDLERS}`" -i pvs_instance/settings.py
-sed -e "/^    'loggers': {$/,/^    }$/c `echo ${LOGGERS}`" -i pvs_instance/settings.py
-
-sed -e "s,http_service_url=http://localhost:8000/ows,http_service_url=${APACHE_ALIAS}/ows," -i pvs_instance/conf/eoxserver.conf
-sed -e "s/resampling_method=average/resampling_method=near/" -i pvs_instance/conf/eoxserver.conf
-# TODO maxsize...
-
-echo "EOXS_VALIDATE_IDS_NCNAME = False" >> pvs_instance/settings.py
-echo "EOXS_OPENSEARCH_RECORD_MODEL = 'eoxserver.resources.coverages.models.Product'" >> pvs_instance/settings.py
-
-echo "CACHES = {
-'default': {
-    'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
-    'LOCATION': '/var/tmp/django_cache',
-}
-}" >> pvs_instance/settings.py
-
-sed -e "/TEMPLATE_DEBUG = DEBUG/d" -i pvs_instance/settings.py
-
-sed -e 's/DEBUG = True/DEBUG = False/' -i pvs_instance/settings.py
-
-# Further configuration
-echo "ALLOWED_HOSTS = ['*']" >> pvs_instance/settings.py
-echo "USE_X_FORWARDED_HOST = True" >> pvs_instance/settings.py
-echo "SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')" >> pvs_instance/settings.py
-
-chmod g+w -R .
-chgrp users -R .
\ No newline at end of file
+{
+    echo "Running configure.sh"
+
+    # Configure instance
+    sed -e "s/'disable_existing_loggers': True,/'disable_existing_loggers': False,/" -i pvs_instance/settings.py
+    HANDLERS="'handlers': {\n
+        'null': {\n
+            'level':'DEBUG',\n
+            'class':'logging.NullHandler',\n
+        },\n
+        'console': {\n
+            'level': 'DEBUG' if DEBUG else 'INFO',\n
+            'class': 'logging.StreamHandler',\n
+            'formatter': 'verbose' if DEBUG else 'simple',\n
+            'filters': [],\n
+        },\n
+    },"
+    LOGGERS="'loggers': {\n
+        'eoxserver': {\n
+            'handlers': ['console'],\n
+            'level': 'DEBUG' if DEBUG else 'INFO',\n
+            'propagate': False,\n
+        },
+    }"
+    sed -e "/^    'handlers': {$/,/^    },$/c `echo ${HANDLERS}`" -i pvs_instance/settings.py
+    sed -e "/^    'loggers': {$/,/^    }$/c `echo ${LOGGERS}`" -i pvs_instance/settings.py
+
+    sed -e "s,http_service_url=http://localhost:8000/ows,http_service_url=${APACHE_ALIAS}/ows," -i pvs_instance/conf/eoxserver.conf
+    sed -e "s/resampling_method=average/resampling_method=near/" -i pvs_instance/conf/eoxserver.conf
+    # TODO maxsize...
+
+    echo "EOXS_VALIDATE_IDS_NCNAME = False" >> pvs_instance/settings.py
+    echo "EOXS_OPENSEARCH_RECORD_MODEL = 'eoxserver.resources.coverages.models.Product'" >> pvs_instance/settings.py
+
+    echo "CACHES = {
+    'default': {
+        'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
+        'LOCATION': '/var/tmp/django_cache',
+    }
+    }" >> pvs_instance/settings.py
+
+    sed -e "/TEMPLATE_DEBUG = DEBUG/d" -i pvs_instance/settings.py
+
+    sed -e 's/DEBUG = True/DEBUG = False/' -i pvs_instance/settings.py
+
+    # Further configuration
+    echo "ALLOWED_HOSTS = ['*']" >> pvs_instance/settings.py
+    echo "USE_X_FORWARDED_HOST = True" >> pvs_instance/settings.py
+    echo "SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')" >> pvs_instance/settings.py
+
+    chmod g+w -R .
+    chgrp users -R .
+} 1>&2
diff --git a/core/entrypoint.sh b/core/entrypoint.sh
index e78b1330d926b5936575619ce36509e11d4d08b3..a8e57f88358df480cba3f7e99640f455eb84f427 100644
--- a/core/entrypoint.sh
+++ b/core/entrypoint.sh
@@ -5,7 +5,7 @@ TIMEOUT=${WAIT_TIMEOUT:='15'}
 
 if [[ ! -z $SERVICES ]] ; then
     for service in $SERVICES ; do
-        wait-for-it -t $TIMEOUT $service
+        wait-for-it -t $TIMEOUT $service >&2
     done
 fi
 
diff --git a/core/initialized.sh b/core/initialized.sh
index 8fc6714bda7f168a3a5807588463805987da1d2e..07fc196a6d0925e93e78227c33bfef0b53635832 100644
--- a/core/initialized.sh
+++ b/core/initialized.sh
@@ -1,4 +1,4 @@
 #!/bin/bash -e
 
 touch "${INSTANCE_DIR}/.initialized"
-echo "Instance ${INSTANCE_ID} is initialized"
+echo "Instance ${INSTANCE_ID} is initialized" >&2
diff --git a/core/run-httpd.sh b/core/run-httpd.sh
index 876e67b1b681f894bc014c9517e7aa7b3633d492..44498e3c1f6338dd0ecb4a49941038e544f44a49 100644
--- a/core/run-httpd.sh
+++ b/core/run-httpd.sh
@@ -2,4 +2,4 @@
 
 
 echo "Running gunicorn"
-exec gunicorn --chdir ${INSTALL_DIR}/pvs_instance/ --bind :80 pvs_instance.wsgi:application --workers 8 --max-requests 10 --max-requests-jitter 3 --worker-class sync --timeout 120 --access-logfile - --error-logfile - --log-level warning --disable-redirect-access-to-syslog
+exec gunicorn --chdir ${INSTALL_DIR}/pvs_instance/ --bind :80 pvs_instance.wsgi:application --workers 8 --max-requests 10 --max-requests-jitter 3 --worker-class sync --timeout 120 --access-logfile - --error-logfile - --log-level warning --disable-redirect-access-to-syslog 2>&1
diff --git a/core/run-registrar.sh b/core/run-registrar.sh
index a3aeb0066d538def8337ae534f49c9ac68af6b65..b75d6f9c9354d633d5a3ced6be095c61d4b04ab8 100644
--- a/core/run-registrar.sh
+++ b/core/run-registrar.sh
@@ -1,9 +1,10 @@
 #!/bin/sh
 
+<<<<<<< core/run-registrar.sh
 echo "Running registrar"
 replace=""
 if test "$REGISTRAR_REPLACE" = true; then
     replace="--replace"
 fi
 
-python3 /registrar.py --mode redis --redis-host ${REDIS_HOST} --redis-port ${REDIS_PORT} --redis-register-queue-key ${REDIS_REGISTER_QUEUE_KEY} --redis-registered-set-key ${REDIS_REGISTERED_SET_KEY} ${replace}
+python3 /registrar.py --mode redis --redis-host ${REDIS_HOST} --redis-port ${REDIS_PORT} --redis-register-queue-key ${REDIS_REGISTER_QUEUE_KEY} --redis-registered-set-key ${REDIS_REGISTERED_SET_KEY} ${replace} >&2
diff --git a/core/wait-initialized.sh b/core/wait-initialized.sh
index bfefbc12868fcc0b52a52748df694b38961447b5..95afa5bde5cc5642492125f53e914d0d8ddfa9a1 100644
--- a/core/wait-initialized.sh
+++ b/core/wait-initialized.sh
@@ -1,7 +1,7 @@
 #!/bin/bash -e
 
 until [ -f "${INSTANCE_DIR}/.initialized" ] ; do
-    echo "Waiting until instance ${INSTANCE_ID} is initialized"
+    echo "Waiting until instance ${INSTANCE_ID} is initialized" >&2
     sleep 3
     # TODO: timeout?
 done
diff --git a/data/.gitkeep b/data/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/docker-compose.base.ops.yml b/docker-compose.base.ops.yml
index f9b2e18a7aff70b874dd1132e51208e3e816e7ce..f29bcc7cd01abe7e88f0e64f5bbed23858b909be 100644
--- a/docker-compose.base.ops.yml
+++ b/docker-compose.base.ops.yml
@@ -27,6 +27,7 @@ services:
       - vhr18-extnet
       - emg-extnet
       - dem-extnet
+      - logging-extnet
 volumes:
   traefik-data:
 networks:
@@ -36,3 +37,5 @@ networks:
     name: emg-extnet
   dem-extnet:
     name: dem-extnet
+  logging-extnet:
+    name: logging-extnet
diff --git a/docker-compose.dem.dev.yml b/docker-compose.dem.dev.yml
index 33f14fbf382bf2a078f5ebca9ba6eaede5853cb1..8ac49a6c078ec3cdc21e82686f8aee3543ea070f 100644
--- a/docker-compose.dem.dev.yml
+++ b/docker-compose.dem.dev.yml
@@ -5,12 +5,20 @@ services:
       - extnet
     ports:
       - "5432:5432"
+    volumes:
+      - type: bind
+        source: ./data/
+        target: /data/
   client:
     ports:
       - "80:80"
     configs:
       - source: client-dev
         target: /usr/share/nginx/html/index.html
+    volumes:
+      - type: bind
+        source: ./data/
+        target: /data/
   renderer:
     ports:
       - "81:80"
@@ -27,6 +35,8 @@ services:
       - type: bind
         source: ./core/
         target: /core/
+    logging:
+      driver: "fluentd"
   cache:
     ports:
       - "83:80"
@@ -44,6 +54,9 @@ services:
       - type: bind
         source: ./preprocessor/
         target: /preprocessor/
+      - type: bind
+        source: ./data/
+        target: /data/
 networks:
   extnet:
     name: dem-extnet
diff --git a/docker-compose.emg.dev.yml b/docker-compose.emg.dev.yml
index 5739969a94825b231f082fddc4a52fd7bc67356f..af436d2e83c8e6a65fe69ad5473896b4ec523bd1 100644
--- a/docker-compose.emg.dev.yml
+++ b/docker-compose.emg.dev.yml
@@ -4,13 +4,21 @@ services:
     networks:
       - extnet
     ports:
-      - "5432:5432"  
+      - "5432:5432"
+    volumes:
+      - type: bind
+        source: ./data/
+        target: /data/
   client:
     ports:
       - "80:80"
     configs:
       - source: client-dev
         target: /usr/share/nginx/html/index.html
+    volumes:
+      - type: bind
+        source: ./data/
+        target: /data/
   renderer:
     ports:
       - "81:80"
@@ -44,6 +52,9 @@ services:
       - type: bind
         source: ./preprocessor/
         target: /preprocessor/
+      - type: bind
+        source: ./data/
+        target: /data/
 networks:
   extnet:
     name: emg-extnet
diff --git a/docker-compose.logging.dev.yml b/docker-compose.logging.dev.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d749cb97edd584b85c8bbe46b708d06e2653ee1f
--- /dev/null
+++ b/docker-compose.logging.dev.yml
@@ -0,0 +1,34 @@
+version: "3.6"
+services:
+  elasticsearch:
+    ports:
+      - "9200:9200"
+    environment:
+      - bootstrap.memory_lock=true
+      - "ES_JAVA_OPTS=-Xms256m -Xmx256m"
+      - "discovery.type=single-node"
+    deploy:
+      resources:
+        limits:
+          memory: 500M
+  database:
+    logging:
+      driver: "fluentd"
+  client:
+    logging:
+      driver: "fluentd"
+  renderer:
+    logging:
+      driver: "fluentd"
+  registrar:
+    logging:
+      driver: "fluentd"
+  cache:
+    logging:
+      driver: "fluentd"
+  preprocessor:
+    logging:
+      driver: "fluentd"
+  kibana:
+    ports:
+      - "5601:5601"
diff --git a/docker-compose.logging.ops.yml b/docker-compose.logging.ops.yml
new file mode 100644
index 0000000000000000000000000000000000000000..53434247c97cc5ba62880d1ca8eafc2a81731d0c
--- /dev/null
+++ b/docker-compose.logging.ops.yml
@@ -0,0 +1,48 @@
+version: "3.6"
+services:
+  fluentd:
+    deploy:
+      placement:
+        # this is not strictly required, but feels right
+        constraints: [node.role == manager]
+
+  elasticsearch:
+    environment:
+      - bootstrap.memory_lock=true
+      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
+      - "discovery.type=single-node"
+    ulimits:
+      memlock:
+        soft: -1
+        hard: -1
+      nofile:
+        soft: 65536
+        hard: 65536
+    deploy:
+      placement:
+        constraints: [node.role == manager]
+      resources:
+        limits:
+          memory: 1000M
+  kibana:
+    deploy:
+      placement:
+        constraints: [node.role == manager]
+      labels:
+        # router for basic auth based access (https)
+        - "traefik.http.routers.kibana.rule=Host(`kibana.pdas.prism.eox.at`)"
+        - "traefik.http.routers.kibana.middlewares=apiauth@file,compress@file"
+        - "traefik.http.routers.kibana.tls=true"
+        - "traefik.http.routers.kibana.tls.certresolver=default"
+        - "traefik.http.routers.kibana.entrypoints=https"
+        - "traefik.http.services.kibana.loadbalancer.sticky=false"
+        - "traefik.http.services.kibana.loadbalancer.server.port=5601"
+        - "traefik.docker.network=logging-extnet"
+        - "traefik.docker.lbswarm=true"
+        - "traefik.enable=true"
+    networks:
+    - logging-extnet
+networks:
+  logging-extnet:
+    name: logging-extnet
+    external: true
diff --git a/docker-compose.logging.yml b/docker-compose.logging.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0eafaa5ebcc1751dd1e1dc684eec98477eb7cd17
--- /dev/null
+++ b/docker-compose.logging.yml
@@ -0,0 +1,49 @@
+version: "3.6"
+services:
+  fluentd:
+    image: registry.gitlab.eox.at/esa/prism/vs/fluentd:latest
+    configs:
+      - source: fluentd-conf
+        target: /fluentd/etc/fluent.conf
+    ports:
+      - "24224:24224"
+    networks:
+      - logging
+    deploy:
+      replicas: 1
+    logging:
+      # fluentd can't start logging to itself
+      driver: "json-file"
+  elasticsearch:
+    image: elasticsearch:7.9.0
+    networks:
+      - logging
+    environment:
+      - bootstrap.memory_lock=true
+      - "ES_JAVA_OPTS=-Xms256m -Xmx256m"
+      - "discovery.type=single-node"
+    deploy:
+      replicas: 1
+      resources:
+        limits:
+          memory: 500M
+    volumes:
+      - es-data:/usr/share/elasticsearch/data
+    logging:
+      # there are startup issues if ES tried to log to fluentd
+      driver: "json-file"
+  kibana:
+    image: kibana:7.9.0
+    logging:
+      driver: "fluentd"
+    networks:
+      - logging
+    deploy:
+      replicas: 1
+configs:
+  fluentd-conf:
+    file: ./fluentd/conf/fluent.conf
+volumes:
+  es-data:
+networks:
+  logging:
diff --git a/docker-compose.vhr18.dev.yml b/docker-compose.vhr18.dev.yml
index d48b1ea24aa7701ef8c05e7166bb64dbf737a8f4..e7c46c3f2348a66e5fa40df62c5c86dc5557d3de 100644
--- a/docker-compose.vhr18.dev.yml
+++ b/docker-compose.vhr18.dev.yml
@@ -4,13 +4,21 @@ services:
     networks:
       - extnet
     ports:
-      - "5432:5432"  
+      - "5432:5432"
+    volumes:
+      - type: bind
+        source: ./data/
+        target: /data/
   client:
     ports:
       - "80:80"
     configs:
       - source: client-dev
         target: /usr/share/nginx/html/index.html
+    volumes:
+      - type: bind
+        source: ./data/
+        target: /data/
   renderer:
     ports:
       - "81:80"
diff --git a/docker-compose.vhr18.yml b/docker-compose.vhr18.yml
index 836164a1b153b7411fc13aaec44d251efd57b24a..3dce84e5a89b704d4e014803a043dd48aa3af699 100644
--- a/docker-compose.vhr18.yml
+++ b/docker-compose.vhr18.yml
@@ -68,6 +68,9 @@ services:
       WAIT_TIMEOUT: 300  # wait up to 5 minutes
     deploy:
       replicas: 1
+    logging:
+      options:
+        tag: "docker.apache2"
     networks:
       - intnet
     command:
diff --git a/env_setup.sh b/env_setup.sh
deleted file mode 100644
index 9f2bb95bc7ffe5d2cb9e26b7676bf2c464ba304d..0000000000000000000000000000000000000000
--- a/env_setup.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-cat $vhr18_db > ./env/vhr18_db.env
-cat $vhr18_django > ./env/vhr18_django.env
-cat $vhr18_obs > ./env/vhr18_obs.env
-
-cat $emg_db > ./env/emg_db.env
-cat $emg_django > ./env/emg_django.env
-cat $emg_obs > ./env/emg_obs.env
-
-
-set -o allexport
-
-source ./env/emg_db.env
-source ./env/vhr18_db.env
-
-set +o allexport
-
-
-sed -i -e 's/emg-data/pvs_testing/g' ./env/emg.env
-sed -i -e 's/vhr18-data/pvs_testing/g' ./env/vhr18.env
-
-sed -i -e 's/emg-cache/pvs_testing/g' ./env/emg_obs.env
-sed -i -e 's/vhr18-cache/pvs_testing/g' ./env/vhr18_obs.env
diff --git a/fluentd/Dockerfile b/fluentd/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..fb2dfae9469b57e2afd689b7b9b28b7cdee3432c
--- /dev/null
+++ b/fluentd/Dockerfile
@@ -0,0 +1,40 @@
+#------------------------------------------------------------------------------
+#
+# Project: prism view server
+# Authors: Stephan Meissl <stephan.meissl@eox.at>
+#
+#------------------------------------------------------------------------------
+# Copyright (C) 2018 EOX IT Services GmbH <https://eox.at>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies of this Software or works derived from this Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#-----------------------------------------------------------------------------
+
+FROM fluent/fluentd:v1.11.2-debian-1.0
+
+MAINTAINER EOX
+LABEL name="prism view server cache" \
+      vendor="EOX IT Services GmbH <https://eox.at>" \
+      license="MIT Copyright (C) 2019 EOX IT Services GmbH <https://eox.at>" \
+      type="prism view server fluentd" \
+      version="0.0.1-dev"
+
+USER root
+RUN gem install fluent-plugin-elasticsearch \
+ && gem install fluent-plugin-rewrite-tag-filter
+USER fluent
diff --git a/fluentd/conf/fluent.conf b/fluentd/conf/fluent.conf
new file mode 100644
index 0000000000000000000000000000000000000000..41c2b8746ea74f9836a17213b75ba511a1328a97
--- /dev/null
+++ b/fluentd/conf/fluent.conf
@@ -0,0 +1,49 @@
+<system>
+  log_level debug
+</system>
+
+<source>
+  @type forward
+  port 24224
+  bind 0.0.0.0
+</source>
+
+<match docker.apache2>
+  @type rewrite_tag_filter
+  <rule>
+    key source
+    pattern /^(.*)$/
+    tag $1.${tag}
+  </rule>
+</match>
+
+<filter stdout.docker.apache2>
+  @type parser
+  key_name log
+  reserve_data true
+  <parse>
+   @type regexp
+   # this is basically apache2 but also has the request time at the end as extra field
+   expression /^(?<host>[^ ]*) [^ ]* (?<user>[^ ]*) \[(?<time>[^\]]*)\] "(?<method>\S+)(?: +(?<path>[^ ]*) +\S*)?" (?<code>[^ ]*) (?<size>[^ ]*)(?: "(?<referer>[^\"]*)" "(?<agent>[^\"]*)")? (?<request_serve_duration>[^ ]*)$/
+   time_format %d/%b/%Y:%H:%M:%S %z
+   types request_serve_duration:integer
+  </parse>
+</filter>
+
+<match *.**>
+  @type copy
+
+  <store>
+    @type elasticsearch
+    host elasticsearch
+    port 9200
+    logstash_format true
+    logstash_prefix fluentd
+    logstash_dateformat %Y%m%d
+    include_tag_key true
+    type_name access_log
+    tag_key @log_name
+    flush_interval 1s
+  </store>
+
+</match>
diff --git a/registrar_test.py b/registrar_test.py
deleted file mode 100644
index 7d2df6332b55589a3d6ad7f80b91937599ff35dd..0000000000000000000000000000000000000000
--- a/registrar_test.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import psycopg2
-import os
-import csv
-
-
-with open('./env/emg_db.env', 'r') as f:
-    env = dict(
-        line.split('=', 1)
-        for line in f
-    )
-database= env['DB_NAME'].replace('\n','')
-port = env['DB_PORT'].replace('\n','')
-host = env['DB_HOST'].replace('\n','')
-database_password= env['DB_PW'].replace('\n','')
-database_user = env['DB_USER'].replace('\n','')
-
-
-def connect_to_db(eo_id):
-    global db_name, coverage_id
-    connection= None
-    try:
-        connection = psycopg2.connect(dbname=database, user=database_user, password=database_password, host='docker', port=port)
-        cursor = connection.cursor()
-        db_name = connection.get_dsn_parameters()["dbname"]
-        postgreSQL_select_Query = "SELECT identifier FROM coverages_eoobject WHERE identifier = '%s';" % eo_id
-        cursor.execute(postgreSQL_select_Query)
-        coverage_id = cursor.fetchone()[0]
-
-    except (Exception, psycopg2.Error) as error :
-        print ("Error while connecting to PostgreSQL", error)
-    finally:
-        #closing database connection.
-        if connection:
-            cursor.close()
-            connection.close()
-            print("PostgreSQL connection is closed")
-                
-
-
-def test_db_name(name):
-    with open(name, newline='') as csvfile:
-        spamreader = csv.reader(csvfile)
-        for row in spamreader:
-            identifier = row[0].split('/')[4]
-            connect_to_db(identifier)
-            assert coverage_id == identifier
-            assert db_name == database
-
diff --git a/testing/docker-stack-wait.sh b/testing/docker-stack-wait.sh
new file mode 100755
index 0000000000000000000000000000000000000000..c7f00a3199b8562cd7759e4f481709a306a9768a
--- /dev/null
+++ b/testing/docker-stack-wait.sh
@@ -0,0 +1,148 @@
+#!/bin/sh
+
+# By: Brandon Mitchell <public@bmitch.net>
+# License: MIT
+# Source repo: https://github.com/sudo-bmitch/docker-stack-wait
+
+set -e
+trap "{ exit 1; }" TERM INT
+opt_h=0
+opt_r=0
+opt_s=5
+opt_t=3600
+start_epoc=$(date +%s)
+
+usage() {
+  echo "$(basename $0) [opts] stack_name"
+  echo "  -f filter: only wait for services matching filter, may be passed multiple"
+  echo "             times, see docker stack services for the filter syntax"
+  echo "  -h:        this help message"
+  echo "  -n name:   only wait for specific service names, overrides any filters,"
+  echo "             may be passed multiple times, do not include the stack name prefix"
+  echo "  -r:        treat a rollback as successful"
+  echo "  -s sec:    frequency to poll service state (default $opt_s sec)"
+  echo "  -t sec:    timeout to stop waiting"
+  [ "$opt_h" = "1" ] && exit 0 || exit 1
+}
+check_timeout() {
+  # timeout when a timeout is defined and we will exceed the timeout after the
+  # next sleep completes
+  if [ "$opt_t" -gt 0 ]; then
+    cur_epoc=$(date +%s)
+    cutoff_epoc=$(expr ${start_epoc} + $opt_t - $opt_s)
+    if [ "$cur_epoc" -gt "$cutoff_epoc" ]; then
+      echo "Error: Timeout exceeded"
+      exit 1
+    fi
+  fi
+}
+get_service_ids() {
+  if [ -n "$opt_n" ]; then
+    service_list=""
+    for name in $opt_n; do
+      service_list="${service_list:+${service_list} }${stack_name}_${name}"
+    done
+    docker service inspect --format '{{.ID}}' ${service_list}
+  else
+    docker stack services ${opt_f} -q "${stack_name}"
+  fi
+}
+service_state() {
+  # output the state when it changes from the last state for the service
+  service=$1
+  # strip any invalid chars from service name for caching state
+  service_safe=$(echo "$service" | sed 's/[^A-Za-z0-9_]/_/g')
+  state=$2
+  if eval [ \"\$cache_${service_safe}\" != \"\$state\" ]; then
+    echo "Service $service state: $state"
+    eval cache_${service_safe}=\"\$state\"
+  fi
+}
+
+while getopts 'f:hn:rs:t:' opt; do
+  case $opt in
+    f) opt_f="${opt_f:+${opt_f} }-f $OPTARG";;
+    h) opt_h=1;;
+    n) opt_n="${opt_n:+${opt_n} } $OPTARG";;
+    r) opt_r=1;;
+    s) opt_s="$OPTARG";;
+    t) opt_t="$OPTARG";;
+  esac
+done
+shift $(expr $OPTIND - 1)
+
+if [ $# -ne 1 -o "$opt_h" = "1" -o "$opt_s" -le "0" ]; then
+  usage
+fi
+
+stack_name=$1
+
+# 0 = running, 1 = success, 2 = error
+stack_done=0
+while [ "$stack_done" != "1" ]; do
+  stack_done=1
+  # run get_service_ids outside of the for loop to catch errors
+  service_ids=$(get_service_ids)
+  for service_id in ${service_ids}; do
+    service_done=1
+    service=$(docker service inspect --format '{{.Spec.Name}}' "$service_id")
+
+    # hardcode a "new" state when UpdateStatus is not defined
+    state=$(docker service inspect -f '{{if .UpdateStatus}}{{.UpdateStatus.State}}{{else}}new{{end}}' "$service_id")
+
+    # check for failed update states
+    case "$state" in
+      paused|rollback_paused)
+        service_done=2
+        ;;
+      rollback_*)
+        if [ "$opt_r" = "0" ]; then
+          service_done=2
+        fi
+        ;;
+    esac
+
+    # identify/report current state
+    if [ "$service_done" != "2" ]; then
+      replicas=$(docker service ls --format '{{.Replicas}}' --filter "id=$service_id" | cut -d' ' -f1)
+      current=$(echo "$replicas" | cut -d/ -f1)
+      target=$(echo "$replicas" | cut -d/ -f2)
+      if [ "$current" != "$target" ]; then
+        # actively replicating service
+        service_done=0
+        state="replicating $replicas"
+      fi
+    fi
+    service_state "$service" "$state"
+
+    # check for states that indicate an update is done
+    if [ "$service_done" = "1" ]; then
+      case "$state" in
+        new|completed|rollback_completed)
+          service_done=1
+          ;;
+        *)
+          # any other state is unknown, not necessarily finished
+          service_done=0
+          ;;
+      esac
+    fi
+
+    # update stack done state
+    if [ "$service_done" = "2" ]; then
+      # error condition
+      stack_done=2
+    elif [ "$service_done" = "0" -a "$stack_done" = "1" ]; then
+      # only go to an updating state if not in an error state
+      stack_done=0
+    fi
+  done
+  if [ "$stack_done" = "2" ]; then
+    echo "Error: This deployment will not complete"
+    exit 1
+  fi
+  if [ "$stack_done" != "1" ]; then
+    check_timeout
+    sleep "${opt_s}"
+  fi
+done
diff --git a/gitlab_test.sh b/testing/gitlab_test.sh
old mode 100644
new mode 100755
similarity index 54%
rename from gitlab_test.sh
rename to testing/gitlab_test.sh
index d8a3d5d2b0ea173b315dcad71284a22947a5f992..348a28c1fe0c8b398e40c27016d2828672db8ec2
--- a/gitlab_test.sh
+++ b/testing/gitlab_test.sh
@@ -1,15 +1,36 @@
 #!/bin/sh
-chmod +x env_setup.sh wait_for_container.sh
-./env_setup.sh
+
+# fetch secrets and write them to their according files
+cat $vhr18_db > ../env/vhr18_db.env
+cat $vhr18_django > ../env/vhr18_django.env
+cat $vhr18_obs > ../env/vhr18_obs.env
+
+cat $emg_db > ../env/emg_db.env
+cat $emg_django > ../env/emg_django.env
+cat $emg_obs > ../env/emg_obs.env
+
+
+# use `pvs_testing` bucket instead
+
+sed -i -e 's/emg-data/pvs_testing/g' ../env/emg.env
+sed -i -e 's/vhr18-data/pvs_testing/g' ../env/vhr18.env
+
+sed -i -e 's/emg-cache/pvs_testing/g' ../env/emg_obs.env
+sed -i -e 's/vhr18-cache/pvs_testing/g' ../env/vhr18_obs.env
+
 mkdir data
 docker swarm init
 docker network create -d overlay emg-extnet
-docker stack deploy -c docker-compose.emg.yml -c docker-compose.emg.dev.yml emg-pvs
+docker stack deploy -c ../docker-compose.emg.yml -c ../docker-compose.emg.dev.yml emg-pvs
 apk update && apk add bash postgresql-dev gcc python3-dev musl-dev py-pip gdal
 pip3 install -r requirements.txt
-./wait_for_container.sh
+
+./docker-stack-wait.sh -n renderer -n registrar -n preprocessor emg-pvs
+
+docker service ls
+
 bash ./registrar_test.sh product_list.csv
 
 # docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py storage create pvs_testing pvs_testing --type swift --storage-auth auth-cloud-ovh
 # docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /core/registrar.py --objects-prefix "OA/PH1B/0.1/b9/urn:eop:PHR:MULTISPECTRAL_0.5m:DS_PHR1B_201608070959189_FR1_PX_E012N32_0719_00974_4148/0000/PH1B_PHR_FUS_1A_20160807T095918_20160807T095920_TOU_1234_4148.DIMA.tar"
-# pytest -s registrar_test.py --name OA/PH1B/0.1/b9/urn:eop:PHR:MULTISPECTRAL_0.5m:DS_PHR1B_201608070959189_FR1_PX_E012N32_0719_00974_4148/0000/PH1B_PHR_FUS_1A_20160807T095918_20160807T095920_TOU_1234_4148.DIMA.tar
\ No newline at end of file
+# pytest -s registrar_test.py --name OA/PH1B/0.1/b9/urn:eop:PHR:MULTISPECTRAL_0.5m:DS_PHR1B_201608070959189_FR1_PX_E012N32_0719_00974_4148/0000/PH1B_PHR_FUS_1A_20160807T095918_20160807T095920_TOU_1234_4148.DIMA.tar
diff --git a/product_list.csv b/testing/product_list.csv
similarity index 100%
rename from product_list.csv
rename to testing/product_list.csv
diff --git a/testing/registrar_test.py b/testing/registrar_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..48436d4ea82b26adee54de22afd6580ca2a51bb5
--- /dev/null
+++ b/testing/registrar_test.py
@@ -0,0 +1,41 @@
+import os
+import csv
+
+import pytest
+import psycopg2
+from dotenv import load_dotenv
+
+
+@pytest.fixture(scope="session")
+def connection():
+    load_dotenv(dotenv_path='../env/emg_db.env')
+
+    connect_args = dict(
+        dbname=os.environ['DB_NAME'],
+        user=os.environ['DB_USER'],
+        password=f"\"{os.environ['DB_PW']}\"",
+        host='docker',
+        port=os.environ['DB_PORT'],
+    )
+
+    with psycopg2.connect(**connect_args) as connection:
+        yield connection
+
+
+@pytest.fixture
+def identifiers():
+    with open('./product_list.csv') as f:
+        yield csv.reader(f)
+
+
+def query_eo_object(connection, eo_id):
+    query = f"SELECT identifier FROM coverages_eoobject WHERE identifier = '{eo_id}';"
+    with connection.cursor() as cursor:
+        cursor.execute(query)
+        return cursor.fetchone()[0]
+
+
+def test_db_name(connection, identifiers):
+    for row in identifiers:
+        identifier = row[0].split('/')[4]
+        query_eo_object(connection, identifier)
diff --git a/registrar_test.sh b/testing/registrar_test.sh
similarity index 76%
rename from registrar_test.sh
rename to testing/registrar_test.sh
index d394a2d1de2fe73b2157adab0136e08c2d2c7708..2a7696c10d22be88b4a6460099a01a4484f6b635 100755
--- a/registrar_test.sh
+++ b/testing/registrar_test.sh
@@ -2,9 +2,8 @@
 product_list_file=$1
 docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py storage create pvs_testing pvs_testing --type swift --storage-auth auth-cloud-ovh
 IFS=","
-while read  product; do
+while read product; do
     docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /registrar.py --objects-prefix $product <<<$product
-    
-done < $product_list_file
+done < "$product_list_file"
 
-pytest -s registrar_test.py --name $product_list_file
+pytest #-s registrar_test.py --name $product_list_file
diff --git a/requirements.txt b/testing/requirements.txt
similarity index 81%
rename from requirements.txt
rename to testing/requirements.txt
index 56ba698664ddb78df20e57a7c6159d1da82feba9..43d82cd782b2b2d1884b394390eaf0abab8aaeac 100644
--- a/requirements.txt
+++ b/testing/requirements.txt
@@ -1,4 +1,5 @@
 pytest
 psycopg2
+python-dotenv
 # python-swiftclient
 # python-keystoneclient
\ No newline at end of file
diff --git a/wait_for_container.sh b/wait_for_container.sh
deleted file mode 100755
index 8a6b5f97f19a839f79fc748e98fdc6c48c0bef61..0000000000000000000000000000000000000000
--- a/wait_for_container.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-while [ -z $(docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py id list) ]; do
-  >&2 echo "Collection is not created yet - sleeping"
-    sleep 20
-  done
-
-while [ -z $(docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py id list) ]; do
-  >&2 echo "Collection is not created yet - sleeping"
-    sleep 20
-  done