diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000000000000000000000000000000000000..9485b6181ffcd02d23d37df6a93eba3294369192 --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,37 @@ +[bumpversion] +current_version = 1.0.0-rc.2 +commit = True +tag = True +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)\.(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}.{build} + {major}.{minor}.{patch} +tag_name = release-{new_version} + +[bumpversion:part:release] +optional_value = final +first_value = alpha +values = + alpha + beta + rc + final + +[bumpversion:file:.bumpversion.cfg] +search = current_version = {current_version} + +[bumpversion:glob:**/Dockerfile] +search = version="{current_version}" +replace = version="{new_version}" + +[bumpversion:glob:preprocessor/setup.py] +search = version="{current_version}" +replace = version="{new_version}" + +[bumpversion:glob:docker-compose*ops.yml] +search = :release-{current_version} +replace = :release-{new_version} + +[bumpversion:glob:config/*ops.html] +search = release-{current_version} +replace = release-{new_version} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index de2ece8a34372a189edb70a8e79aef700e7a5c24..74dd9a1c64a66e9d8422aa440f720b0c63196a39 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,83 +4,130 @@ variables: stages: - build - -build-master: - image: docker:latest +build-tag: + image: registry.gitlab.eox.at/esa/prism/vs/docker-base-testing:latest stage: build services: - - docker:dind + - docker:19.03.13-dind before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY script: - - VERSION_1=`grep 'version="*"' core/Dockerfile | cut -d '"' -f2` - IMAGE_1="$CI_REGISTRY_IMAGE/pvs_core" - docker pull "$IMAGE_1":latest || true - - docker build --cache-from "$IMAGE_1":latest -t "$IMAGE_1":dev -t "$IMAGE_1":$VERSION_1 core/ - - VERSION_2=`grep 'version="*"' preprocessor/Dockerfile | cut -d '"' -f2` + - docker build --cache-from "$IMAGE_1":latest -t "$IMAGE_1":dev -t "$IMAGE_1":$CI_COMMIT_TAG core/ - IMAGE_2="$CI_REGISTRY_IMAGE/pvs_preprocessor" - docker pull "$IMAGE_2":latest || true - - docker build --cache-from "$IMAGE_2":latest -t "$IMAGE_2":dev -t "$IMAGE_2":$VERSION_2 preprocessor/ - - VERSION_3=`grep 'version="*"' client/Dockerfile | cut -d '"' -f2` + - docker build --cache-from "$IMAGE_2":latest -t "$IMAGE_2":dev -t "$IMAGE_2":$CI_COMMIT_TAG preprocessor/ - IMAGE_3="$CI_REGISTRY_IMAGE/pvs_client" - docker pull "$IMAGE_3":latest || true - - docker build --cache-from "$IMAGE_3":latest -t "$IMAGE_3":dev -t "$IMAGE_3":$VERSION_3 client/ - - VERSION_4=`grep 'version="*"' cache/Dockerfile | cut -d '"' -f2` + - docker build --cache-from "$IMAGE_3":latest -t "$IMAGE_3":dev -t "$IMAGE_3":$CI_COMMIT_TAG client/ - IMAGE_4="$CI_REGISTRY_IMAGE/pvs_cache" - docker pull "$IMAGE_4":latest || true - - docker build --cache-from "$IMAGE_4":latest -t "$IMAGE_4":dev -t "$IMAGE_4":$VERSION_4 cache/ - - VERSION_5=`grep 'version="*"' fluentd/Dockerfile | cut -d '"' -f2` + - docker build --cache-from "$IMAGE_4":latest -t "$IMAGE_4":dev -t "$IMAGE_4":$CI_COMMIT_TAG cache/ - IMAGE_5="$CI_REGISTRY_IMAGE/fluentd" - docker pull "$IMAGE_5":latest || true - - docker build --cache-from "$IMAGE_5":latest -t "$IMAGE_5":dev -t "$IMAGE_5":$VERSION_5 fluentd/ - - VERSION_6=`grep 'version="*"' ingestor/Dockerfile | cut -d '"' -f2` + - docker build --cache-from "$IMAGE_5":latest -t "$IMAGE_5":dev -t "$IMAGE_5":$CI_COMMIT_TAG fluentd/ - IMAGE_6="$CI_REGISTRY_IMAGE/pvs_ingestor" - docker pull "$IMAGE_6":latest || true - - docker build --cache-from "$IMAGE_6":latest -t "$IMAGE_6":dev -t "$IMAGE_6":$VERSION_6 ingestor/ + - docker build --cache-from "$IMAGE_6":latest -t "$IMAGE_6":dev -t "$IMAGE_6":$CI_COMMIT_TAG ingestor/ + - cd ./testing && ./gitlab_test.sh + - if [ $? -ne 0 ]; then exit 1; fi # actually fail build + - docker push "$IMAGE_1":$CI_COMMIT_TAG + - docker push "$IMAGE_2":$CI_COMMIT_TAG + - docker push "$IMAGE_3":$CI_COMMIT_TAG + - docker push "$IMAGE_4":$CI_COMMIT_TAG + - docker push "$IMAGE_5":$CI_COMMIT_TAG + - docker push "$IMAGE_6":$CI_COMMIT_TAG + only: + - tags + artifacts: + paths: + - /tmp/*.log +build-master-staging: + image: registry.gitlab.eox.at/esa/prism/vs/docker-base-testing:latest + stage: build + services: + - docker:19.03.13-dind + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + script: + - if [[ "$CI_COMMIT_BRANCH" = "master" ]] ; then TAG_USED="latest"; else TAG_USED="staging"; fi + - IMAGE_1="$CI_REGISTRY_IMAGE/pvs_core" + - docker pull "$IMAGE_1":"$TAG_USED" || true + - docker build --cache-from "$IMAGE_1":"$TAG_USED" -t "$IMAGE_1":dev -t "$IMAGE_1":"$TAG_USED" core/ + - IMAGE_2="$CI_REGISTRY_IMAGE/pvs_preprocessor" + - docker pull "$IMAGE_2":"$TAG_USED" || true + - docker build --cache-from "$IMAGE_2":"$TAG_USED" -t "$IMAGE_2":dev -t "$IMAGE_2":"$TAG_USED" preprocessor/ + - IMAGE_3="$CI_REGISTRY_IMAGE/pvs_client" + - docker pull "$IMAGE_3":"$TAG_USED" || true + - docker build --cache-from "$IMAGE_3":"$TAG_USED" -t "$IMAGE_3":dev -t "$IMAGE_3":"$TAG_USED" client/ + - IMAGE_4="$CI_REGISTRY_IMAGE/pvs_cache" + - docker pull "$IMAGE_4":"$TAG_USED" || true + - docker build --cache-from "$IMAGE_4":"$TAG_USED" -t "$IMAGE_4":dev -t "$IMAGE_4":"$TAG_USED" cache/ + - IMAGE_5="$CI_REGISTRY_IMAGE/fluentd" + - docker pull "$IMAGE_6":"$TAG_USED" || true + - docker build --cache-from "$IMAGE_5":"$TAG_USED" -t "$IMAGE_5":dev -t "$IMAGE_5":"$TAG_USED" fluentd/ + - IMAGE_6="$CI_REGISTRY_IMAGE/pvs_ingestor" + - docker pull "$IMAGE_6":"$TAG_USED" || true + - docker build --cache-from "$IMAGE_6":"$TAG_USED" -t "$IMAGE_6":dev -t "$IMAGE_6":"$TAG_USED" ingestor/ - cd ./testing && ./gitlab_test.sh - if [ $? -ne 0 ]; then exit 1; fi # actually fail build - - docker push "$IMAGE_1":$VERSION_1 - - docker push "$IMAGE_1":latest - - docker push "$IMAGE_2":$VERSION_2 - - docker push "$IMAGE_2":latest - - docker push "$IMAGE_3":$VERSION_3 - - docker push "$IMAGE_3":latest - - docker push "$IMAGE_4":$VERSION_4 - - docker push "$IMAGE_4":latest - - docker push "$IMAGE_5":$VERSION_5 - - docker push "$IMAGE_5":latest - - docker push "$IMAGE_6":$VERSION_6 - - docker push "$IMAGE_6":latest + - docker push "$IMAGE_1":"$TAG_USED" + - docker push "$IMAGE_2":"$TAG_USED" + - docker push "$IMAGE_3":"$TAG_USED" + - docker push "$IMAGE_4":"$TAG_USED" + - docker push "$IMAGE_5":"$TAG_USED" + - docker push "$IMAGE_6":"$TAG_USED" only: + - staging - master + except: - tags + artifacts: + paths: + - /tmp/*.log build: - image: docker:latest + image: registry.gitlab.eox.at/esa/prism/vs/docker-base-testing:latest stage: build services: - - docker:dind + - docker:19.03.13-dind before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY script: - IMAGE="$CI_REGISTRY_IMAGE/pvs_core" - - docker pull "$IMAGE":latest || true - - docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev core/ + - docker pull "$IMAGE":staging || true + - docker build --cache-from "$IMAGE":staging -t "$IMAGE":dev core/ - IMAGE="$CI_REGISTRY_IMAGE/pvs_preprocessor" - - docker pull "$IMAGE":latest || true - - docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev preprocessor/ + - docker pull "$IMAGE":staging || true + - docker build --cache-from "$IMAGE":staging -t "$IMAGE":dev preprocessor/ - IMAGE="$CI_REGISTRY_IMAGE/pvs_client" - - docker pull "$IMAGE":latest || true - - docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev client/ + - docker pull "$IMAGE":staging || true + - docker build --cache-from "$IMAGE":staging -t "$IMAGE":dev client/ - IMAGE="$CI_REGISTRY_IMAGE/pvs_cache" - - docker pull "$IMAGE":latest || true - - docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev cache/ + - docker pull "$IMAGE":staging || true + - docker build --cache-from "$IMAGE":staging -t "$IMAGE":dev cache/ - IMAGE="$CI_REGISTRY_IMAGE/fluentd" - - docker pull "$IMAGE":latest || true - - docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev fluentd/ + - docker pull "$IMAGE":staging || true + - docker build --cache-from "$IMAGE":staging -t "$IMAGE":dev fluentd/ - IMAGE="$CI_REGISTRY_IMAGE/pvs_ingestor" - - docker pull "$IMAGE":latest || true - - docker build --cache-from "$IMAGE":latest -t "$IMAGE":dev ingestor/ + - docker pull "$IMAGE":staging || true + - docker build --cache-from "$IMAGE":staging -t "$IMAGE":dev ingestor/ - cd ./testing && ./gitlab_test.sh - if [ $? -ne 0 ]; then exit 1; fi # actually fail build except: + - tags + - staging - master + artifacts: + paths: + # - /tmp/emg-pvs_cache + # - /tmp/emg-pvs_client + - /tmp/emg-pvs_database + # - /tmp/emg-pvs_fluentd + # - /tmp/emg-pvs_ingestor + - /tmp/emg-pvs_preprocessor + # - /tmp/emg-pvs_redis + - /tmp/emg-pvs_registrar + - /tmp/emg-pvs_renderer + # - /tmp/emg-pvs_seeder + # - /tmp/emg-pvs_sftp diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..451c3f0cb1fe37f00a5793799a949877bc09e9aa --- /dev/null +++ b/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2019-2020 EOX IT Services GmbH + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index fff3c4968fbd172317d9a8ee5f13f16fdb97adf4..d7870362abaf0c7fe73322e26fcbdd9cc49deb62 100644 --- a/README.md +++ b/README.md @@ -53,6 +53,13 @@ The following services are defined via docker compose files. * provides the endpoint for external access * configured via docker labels +### shibauth + +* based on the external unicon/shibboleth-sp:3.0.4 Apache + Shibboleth SP3 image +* provides authentication and authorization via SAML2 +* docker configuration files set access control rules +* traefik labels determine which services are protected via Shib + ### database * based on external postgis:10 image @@ -173,7 +180,7 @@ The following services are defined via docker compose files. * uses external atmoz/sftp image * provides sftp access to two volumes for report exchange on registration result xmls and ingest requirement xmls -* accessible on swarm master on port 2222 +* accessible on swarm master on port 2222-22xx * credentials supplied via config # Usage @@ -217,7 +224,7 @@ printf "" | docker secret create DJANGO_PASSWORD - printf "" | docker secret create OS_PASSWORD - # configs creation -printf ":::" | docker config create sftp-users - +printf ":::" | docker config create sftp_users_ - # for production base stack deployment, additional basic authentication credentials list need to be created # format of such a list used by traefik are username:hashedpassword (MD5, SHA1, BCrypt) sudo apt-get install apache2-utils @@ -226,6 +233,18 @@ docker secret create BASIC_AUTH_USERS_AUTH auth_list.txt docker secret create BASIC_AUTH_USERS_APIAUTH auth_list_api.txt ``` +In case **shibauth** service will be used, for production deployment, two more secrets need to be created for each stack, where **shibauth** is deployed. These ensure that the SP is recognized and its identity confirmed by the IDP. They are configured as **stack-name-capitalized_SHIB_KEY** and **stack-name-capitalized_SHIB_CERT**. In order to create them, use the attached **keygen.sh** command-line tool in */config* folder. +```bash +SPURL="https://emg.pass.copernicus.eu" # service initial access point made accessible by traefik +./config/keygen.sh -h $SPURL -y 20 -e https://$SPURL/shibboleth -n sp-signing -f +docker secret create EMG_SHIB_CERT sp-signing-cert.pem +docker secret create EMG_SHIB_KEY sp-signing-key.pem +``` +Additionally a docker config `idp-metadata` containing the metadata of the used IDP needs to be added: +```bash +docker config create idp_metadata idp-metadata-received.xml +``` + Deploy the stack in dev environment: ``` docker stack deploy -c docker-compose.vhr18.yml -c docker-compose.vhr18.dev.yml -c docker-compose.logging.yml -c docker-compose.logging.dev.yml vhr18-pvs # start VHR_IMAGE_2018 stack in dev mode, for example to use local sources @@ -234,7 +253,7 @@ docker stack deploy -c docker-compose.emg.yml -c docker-compose.emg.dev.yml -c d Deploy base & logging stack in production environment: ``` docker stack deploy -c docker-compose.base.ops.yml base-pvs -docker stack deploy -c docker-compose.logging.yml docker-compose.logging.ops.yml logging +docker stack deploy -c docker-compose.logging.yml -c docker-compose.logging.ops.yml logging ``` Deploy the stack in production environment: Please note that in order to reuse existing database volumes, needs to be the same. Here we use `vhr18-pvs` but in operational service `vhr18-pdas` is used. @@ -285,7 +304,7 @@ On production machine, `fluentd` is set as a logging driver for docker daemon by The `SFTP` image allow remote access into 2 logging folders, you can define (edit/add) users, passwords and (UID/GID) using `docker config create` mentioned above. -In the below example the username is `eox`, once the stack is deployed you can sftp into the logging folders through port 2222 on -if you are running the dev stack- localhost : +In the below example the username is `eox`, once the stack is deployed you can sftp into the logging folders through port 2222 (for ``vhr18``, ``emg`` and ``dem`` have 2223 and 2224 respectively) if you are running the dev stack localhost : ```bash sftp -P 2222 eox@127.0.0.1 @@ -294,7 +313,6 @@ You will log in into`/home/eox/data` directory which contains the 2 logging dir **NOTE:** The mounted directory that you are directed into is *`/home/user`*, where `user` is the username, hence when setting / editing the username in configs, the `sftp` mounted volumes path in `docker-compose..yml` must change respectively. - # Documentation ## Installation @@ -323,12 +341,27 @@ The documentation is generated in the respective *_build/html* directory. # Create software releases +## Release a new vs version + +We use [bump2version](https://github.com/c4urself/bump2version) to increment versions of invividual docker images and create git tags. Tags after push trigger CI `docker push` action of versioned images. It also updates used image versions in `.ops` docker compose files. + +Pushing to `master` branch updates `latest` images, while `staging` branch push updates `staging` images. +For **versions** in general, we use semantic versioning with format {major}.{minor}.{patch}-{release}.{build}. +First check deployed staging version on staging platform (TBD), then if no problems are found, proceed. +Following operation should be done on `staging` or `master` branch. +``` +bump2version +git push +git push --tags +``` +If it was done on `staging` branch, then it should be merged to `master`, unless only a patch to previous major versions is made. +A hotfix to production is developed in a branch initiated from master, then merged to staging for verification. It is then merged to master for release. ## Source code release Create a TAR from source code: ```bash -git archive --prefix release-1.0.0.rc.1/ -o release-1.0.0.rc.1.tar.gz -9 master +git archive --prefix release-1.0.0/ -o release-1.0.0.tar.gz -9 master ``` Save Docker images: diff --git a/cache/Dockerfile b/cache/Dockerfile index 4653a2a727929a0851551864e3300af66a29ed45..57abc9760abf48796ffd709995e49f75bec4a02b 100644 --- a/cache/Dockerfile +++ b/cache/Dockerfile @@ -31,7 +31,7 @@ LABEL name="prism view server cache" \ vendor="EOX IT Services GmbH " \ license="MIT Copyright (C) 2019 EOX IT Services GmbH " \ type="prism view server cache" \ - version="0.0.1-dev" + version="1.0.0-rc.2" USER root ADD install.sh \ diff --git a/chart/.helmignore b/chart/.helmignore new file mode 100644 index 0000000000000000000000000000000000000000..0e8a0eb36f4ca2c939201c0d54b5d82a1ea34778 --- /dev/null +++ b/chart/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/chart/Chart.lock b/chart/Chart.lock new file mode 100644 index 0000000000000000000000000000000000000000..cd2ee36b88c0db94af50837d331f5ad23abecc41 --- /dev/null +++ b/chart/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: postgresql + repository: https://charts.bitnami.com/bitnami + version: 9.7.2 +- name: redis + repository: https://charts.bitnami.com/bitnami + version: 10.9.0 +digest: sha256:19a4b34c5ee40a44b18979a052b53a54569b69bd8b37e6f805826482dc6432ea +generated: "2020-11-09T16:57:52.383912+01:00" diff --git a/chart/Chart.yaml b/chart/Chart.yaml new file mode 100644 index 0000000000000000000000000000000000000000..90fc7b43e4e3d8a8bbaf577a160dd5da725402b5 --- /dev/null +++ b/chart/Chart.yaml @@ -0,0 +1,28 @@ +apiVersion: v2 +name: vs +description: A Helm chart for Kubernetes of the View Server (VS) + +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0-beta.1 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. +appVersion: 1.0.0-beta.1 + +maintainers: + - name: EOX IT Services GmbH + url: https://eox.at + +dependencies: + - name: "postgresql" + version: "9.7.2" + repository: "https://charts.bitnami.com/bitnami" + alias: database + - name: "redis" + version: "10.9.0" + repository: "https://charts.bitnami.com/bitnami" + alias: redis diff --git a/chart/README.md b/chart/README.md new file mode 100644 index 0000000000000000000000000000000000000000..acc5e4533912b96530d015174f4b1566a5815fbe --- /dev/null +++ b/chart/README.md @@ -0,0 +1,10 @@ +Chart for the View Server (VS) bundling all services + +Useful commands: + +```bash +helm dependency update + +helm template testing . --output-dir ../tmp/ -f values.yaml + +``` diff --git a/chart/charts/postgresql-9.7.2.tgz b/chart/charts/postgresql-9.7.2.tgz new file mode 100644 index 0000000000000000000000000000000000000000..6189f2c85ae6627787637a101759f2ce6acefca7 Binary files /dev/null and b/chart/charts/postgresql-9.7.2.tgz differ diff --git a/chart/charts/redis-10.9.0.tgz b/chart/charts/redis-10.9.0.tgz new file mode 100644 index 0000000000000000000000000000000000000000..9bd7302ed264e66fd07a668e3fb42df646e79265 Binary files /dev/null and b/chart/charts/redis-10.9.0.tgz differ diff --git a/chart/files/index.html b/chart/files/index.html new file mode 100644 index 0000000000000000000000000000000000000000..b74ad39fceb75c740a2aa076b90b5afaf62a7c37 --- /dev/null +++ b/chart/files/index.html @@ -0,0 +1,331 @@ + + + + + +PRISM View Server + + + + + +
+ + + + diff --git a/chart/files/init-db.sh b/chart/files/init-db.sh new file mode 100644 index 0000000000000000000000000000000000000000..7ef87270accebeb3a13f83f49f3511fa845e9a2f --- /dev/null +++ b/chart/files/init-db.sh @@ -0,0 +1,170 @@ +{{/* + Template to create the invocation of a browse type from a given + product type name, browse type name and browse type definition + + Expects '.' to be a dictionary in the following shape: + + { + product_type_name: "", + browse_type_name: "", // or nil + browse_type: { + // either + grey: { + expression: "", + range: [low, high], // optional + nodata: nodatavalue, // optional + } + // or + red: { + expression: "", + range: [low, high], // optional + nodata: nodatavalue, // optional + }, + green: { + expression: "", + range: [low, high], // optional + nodata: nodatavalue, // optional + }, + blue: { + expression: "", + range: [low, high], // optional + nodata: nodatavalue, // optional + }, + // optionally: + alpha: { + expression: "", + range: [low, high], // optional + nodata: nodatavalue, // optional + } + } + } +*/}} +{{- define "browsetype.cli" }} + python3 manage.py browsetype create "{{ .product_type_name }}" {{ if .browse_type_name }} "{{ .browse_type_name }}" {{- end }} \ + {{- if hasKey .browse_type "grey" }} + --grey {{ .browse_type.grey.expression | quote }} \ + {{- if hasKey .browse_type.grey "range" }} + --grey-range {{ range .browse_type.grey.range }}{{ . }} {{ end }}\ + {{- end }} + {{- if hasKey .browse_type.grey "nodata" }} + --grey-nodata {{ .browse_type.grey.nodata }} \ + {{- end }} + {{- else if and (.browse_type.red) (and .browse_type.green .browse_type.blue) }} + --red {{ .browse_type.red.expression | quote }} \ + --green {{ .browse_type.green.expression | quote }} \ + --blue {{ .browse_type.blue.expression | quote }} \ + {{- if hasKey .browse_type.red "range" }} + --red-range {{ range .browse_type.red.range }}{{ . }} {{ end }}\ + {{- end }} + {{- if hasKey .browse_type.green "range" }} + --green-range {{ range .browse_type.green.range }}{{ . }} {{ end }}\ + {{- end }} + {{- if hasKey .browse_type.blue "range" }} + --blue-range {{ range .browse_type.blue.range }}{{ . }} {{ end }}\ + {{- end }} + {{- if hasKey .browse_type.red "nodata" }} + --red-nodata {{ .browse_type.red.nodata }} \ + {{- end }} + {{- if hasKey .browse_type.green "nodata" }} + --green-nodata {{ .browse_type.green.nodata }} \ + {{- end }} + {{- if hasKey .browse_type.blue "nodata" }} + --blue-nodata {{ .browse_type.blue.nodata }} \ + {{- end }} + {{- if hasKey .browse_type "alpha" }} + --grey {{ .browse_type.alpha.expression | quote }} \ + {{- if hasKey .browse_type.alpha "range" }} + --alpha-range {{ range .browse_type.alpha.range }}{{ . }} {{ end }}\ + {{- end }} + {{- if hasKey .browse_type.alpha "nodata" }} + --alpha-nodata {{ .browse_type.alpha.nodata }} \ + {{- end }} + {{- end }} + {{- end }} + --traceback +{{- end -}} + +# Check if collection exits in database and initialize database only if not +if python3 manage.py id check {{ index (keys .Values.config.collections) 0 | quote }}; then + echo "Initialize database" + + python3 manage.py coveragetype import /rgbnir_definition.json \ + --traceback + + +# TODO: deleteme + + python3 manage.py coveragetype create S2L2A_B01 --field-type B01 B01 "Solar irradiance" "W/m2/um" 1913.57 + python3 manage.py coveragetype create S2L2A_B02 --field-type B02 B02 "Solar irradiance" "W/m2/um" 1941.63 + python3 manage.py coveragetype create S2L2A_B03 --field-type B03 B03 "Solar irradiance" "W/m2/um" 1822.61 + python3 manage.py coveragetype create S2L2A_B04 --field-type B04 B04 "Solar irradiance" "W/m2/um" 1512.79 + python3 manage.py coveragetype create S2L2A_B05 --field-type B05 B05 "Solar irradiance" "W/m2/um" 1425.56 + python3 manage.py coveragetype create S2L2A_B06 --field-type B06 B06 "Solar irradiance" "W/m2/um" 1288.32 + python3 manage.py coveragetype create S2L2A_B07 --field-type B07 B07 "Solar irradiance" "W/m2/um" 1163.19 + python3 manage.py coveragetype create S2L2A_B08 --field-type B08 B08 "Solar irradiance" "W/m2/um" 1036.39 + python3 manage.py coveragetype create S2L2A_B8A --field-type B8A B8A "Solar irradiance" "W/m2/um" 955.19 + python3 manage.py coveragetype create S2L2A_B09 --field-type B09 B09 "Solar irradiance" "W/m2/um" 813.04 + python3 manage.py coveragetype create S2L2A_B11 --field-type B11 B11 "Solar irradiance" "W/m2/um" 245.59 + python3 manage.py coveragetype create S2L2A_B12 --field-type B12 B12 "Solar irradiance" "W/m2/um" 85.25 + + + + + + + + + echo "Initializing collection {{ index (keys .Values.config.collections) 0 | squote }}." + + {{- range $product_type_name, $product_type := .Values.config.products.types | default dict }} + + # + # {{ $product_type_name }} + # + + # create the product type + python3 manage.py producttype create {{ $product_type_name | quote }} \ + {{ range $_, $coverage_type := $product_type.coverages }}--coverage-type {{ $coverage_type | quote }} \ + {{ end }} --traceback + + {{- if hasKey $product_type "default_browse" }} + {{- template "browsetype.cli" dict "product_type_name" $product_type_name "browse_type_name" nil "browse_type" (get $product_type.browses $product_type.default_browse) -}} + {{- end }} + + {{- range $browse_type_name, $browse_type := $product_type.browses }} + {{- template "browsetype.cli" dict "product_type_name" $product_type_name "browse_type_name" $browse_type_name "browse_type" $browse_type -}} + {{- end }} + + # create mask type + {{- range $mask_type_name, $mask_type := $product_type.masks }} + python3 manage.py masktype create {{ $product_type_name | quote }} {{ $mask_type_name | quote }} \ + {{ if $mask_type.validity -}} --validity \ {{- end }} --traceback + {{- end }} + {{- end }} {{/* range .Values.config.products.types */}} + + # set up collection type + {{- range $collection_name, $collection := .Values.config.collections }} + python3 manage.py collectiontype create "{{ $collection_name }}_type" \ + {{- range $coverage_type := $collection.coverage_types }} + --coverage-type {{ $coverage_type | quote }} \ + {{- end }} + {{- range $product_type_name := $collection.product_types }} + --product-type {{ $product_type_name | quote }} \ + {{- end }} + --traceback + + # Instantiate a collection for the collection itself and all levels + python3 manage.py collection create {{ $collection_name | quote }} \ + --type "{{ $collection_name }}_type" \ + --traceback + + {{- range $product_level := $collection.product_levels }} + python3 manage.py collection create "{{ $collection_name }}_{{ $product_level }}" \ + --type "{{ $collection_name }}_type" \ + --traceback + {{- end }} + {{- end }} {{/* range .collections */}} + +else + echo "Using existing database" +fi diff --git a/chart/files/mapcache.xml b/chart/files/mapcache.xml new file mode 100644 index 0000000000000000000000000000000000000000..c84abb6d4fb052ff60a5f63916b571cf3af59522 --- /dev/null +++ b/chart/files/mapcache.xml @@ -0,0 +1,131 @@ + + mixed + + fast + + + 75 + ycbcr + + + mypng + myjpeg + + {{- if .Values.config.cache.services.wms.enabled }} + + assemble + bilinear + mixed + 4096 + + {{- end }} + {{- if .Values.config.cache.services.wmts.enabled }} + + {{- end }} + {{- with .Values.config.cache.metadata }} + + {{ .title }} + {{ .abstract }} + {{ .url }} + {{ .keyword }} + {{ .accessconstraints }} + {{ .fees }} + {{ .contactname }} + {{ .contactphone }} + {{ .contactfacsimile }} + {{ .contactorganization }} + {{ .contactcity }} + {{ .contactstateorprovince }} + {{ .contactpostcode }} + {{ .contactcountry }} + {{ .contactelectronicmailaddress }} + {{ .contactposition }} + {{ .providername }} + {{ .providerurl }} + {{ .inspire_profile }} + {{ .inspire_metadataurl }} + {{ .defaultlanguage }} + {{ .language }} + + {{- end }} + empty_img + /tmp + true + + + {{- if eq .Values.config.objectStorage.cache.type "swift" }} + + {{ .Values.config.objectStorage.cache.auth_url_short }} + {{ .Values.config.objectStorage.cache.auth_version }} + {{ .Values.config.objectStorage.cache.tenant_id }} + {{ .Values.config.objectStorage.cache.username }} + {{ .Values.config.objectStorage.cache.password }} + {{ .Values.config.objectStorage.cache.container }} + {{ .Values.config.objectStorage.key | default "/{tileset}/{grid}/{dim}/{z}/{x}/{y}.{ext}" }} + + {{- else if eq .Values.config.objectStorage.cache.type "S3"}} # TODO + {{- else }} + + {{- end }} + + {{- define "mapcache-layerid" -}}{{ .collection_name }}{{ if .level_name }}__{{ .level_name }}{{ end }}{{ if .sub_layer_name }}__{{ .sub_layer_name }}{{ end }}{{- end }} + + {{- range $collection_name, $collection := .Values.config.collections -}} + + {{- $sub_types := list nil -}} + {{- range $product_type_name := $collection.product_types }} + {{- $sub_types := concat $sub_types (get (get $.Values.config.products.types $product_type_name | default dict) "browses" | default dict | keys) }} + {{- $sub_types := concat $sub_types (get (get $.Values.config.products.types $product_type_name | default dict) "masks" | default dict | keys) }} + {{- end -}} + + + {{- range $level_name := (concat (list nil) $collection.product_levels) }} + {{- range $sub_type_name := ($sub_types | uniq) }} + {{- $layer_id := (include "mapcache-layerid" (dict "collection_name" $collection_name "level_name" $level_name "sub_type_name" $sub_type_name)) | trim }} + + + + {{ $layer_id }} + true + {{ get (get $.Values.config.cache.tilesets $layer_id | default dict) "style" }} + + + + http://renderer/ows + {{ $.Values.config.cache.connection_timeout | default 10 }} + {{ $.Values.config.cache.timeout | default 120 }} + + + {{- end }} + {{- end }} + + + {{- range $level_name := (concat (list nil) $collection.product_levels) }} + {{- range $sub_type_name := ($sub_types | uniq) }} + {{- $layer_id := (include "mapcache-layerid" (dict "collection_name" $collection_name "level_name" $level_name "sub_type_name" $sub_type_name)) | trim }} + + + {{ (get $.Values.config.cache.tilesets $layer_id | default dict ).title }} + {{ (get $.Values.config.cache.tilesets $layer_id | default dict ).abstract }} + + {{ $layer_id }} + cache + WGS84 + mixed + 1 1 + {{ $.Values.config.cache.expires | default 3600 }} + + stack + false + false + + host={{ $.Values.config.database.host }} user={{ $.Values.config.database.user }} password={{ $.Values.config.database.password }} dbname={{ $.Values.config.database.name }} port={{ $.Values.config.database.port | default 5432 }} + SELECT to_char(MIN(mapcache_items.begin_time), 'YYYY-MM-DD"T"HH24:MI:SS"Z"') || '/' || to_char(MAX(mapcache_items.end_time), 'YYYY-MM-DD"T"HH24:MI:SS"Z"') FROM mapcache_items WHERE mapcache_items.collection = '{{ $layer_id }}'; + SELECT * FROM (SELECT to_char(mapcache_items.begin_time, 'YYYY-MM-DD"T"HH24:MI:SS"Z"') || '/' || to_char(mapcache_items.end_time, 'YYYY-MM-DD"T"HH24:MI:SS"Z"') AS "interval" FROM mapcache_items WHERE (mapcache_items.collection = '{{ $layer_id }}' AND ((mapcache_items."begin_time" < to_timestamp(:end_timestamp) AND mapcache_items."end_time" > to_timestamp(:start_timestamp)) or (mapcache_items."begin_time" = mapcache_items."end_time" AND mapcache_items."begin_time" <= to_timestamp(:end_timestamp) AND mapcache_items."end_time" >= to_timestamp(:start_timestamp)))) AND mapcache_items."footprint" && ST_MakeEnvelope(:minx, :miny, :maxx, :maxy, 4326) ORDER BY mapcache_items."end_time" DESC LIMIT 20) AS sub ORDER BY interval ASC; + + + + {{- end }} + {{- end }} + +{{- end -}} \ No newline at end of file diff --git a/chart/files/preprocessor-config.yaml b/chart/files/preprocessor-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5bdc957e4aed30e431d661267a13db155087a9a3 --- /dev/null +++ b/chart/files/preprocessor-config.yaml @@ -0,0 +1,43 @@ +source: + {{- with .Values.config.objectStorage.download }} + type: {{ .type }} + kwargs: + {{- if eq .type "swift" }} + username: {{ .username }} + password: {{ .password }} + tenant_name: {{ .tenant_name }} + tenant_id: {{ .tenant_id }} + region_name: {{ .region_name }} + auth_url: {{ .auth_url }} + auth_version: {{ .auth_version }} + user_domain_name: {{ .user_domain_name }} + {{- else if eq .type "s3" }} + bucket: {{ .bucket }} + endpoint_url: {{ .endpoint_url }} + access_key_id: {{ .access_key_id }} + secret_access_key: {{ .secret_access_key }} + region: {{ .region }} + {{- end }} + {{- end }} +target: + {{- with .Values.config.objectStorage.data }} + type: {{ .type }} + kwargs: + {{- if eq .type "swift" }} + username: {{ .username }} + password: {{ .password }} + tenant_name: {{ .tenant_name }} + tenant_id: {{ .tenant_id }} + region_name: {{ .region_name }} + auth_url: {{ .auth_url }} + auth_version: {{ .auth_version }} + user_domain_name: {{ .user_domain_name }} + {{- else if eq .type "s3" }} + bucket: {{ .bucket }} + endpoint_url: {{ .endpoint_url }} + access_key_id: {{ .access_key_id }} + secret_access_key: {{ .secret_access_key }} + region: {{ .region }} + {{- end }} + {{- end }} +{{ toYaml .Values.config.preprocessor | indent 2 }} diff --git a/chart/files/registrar-config.yaml b/chart/files/registrar-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66a887eabf12d3a32125764029cb7670fb619804 --- /dev/null +++ b/chart/files/registrar-config.yaml @@ -0,0 +1,56 @@ +sources: + {{- with .Values.config.objectStorage.data }} + {{- $type := ( .type | lower ) }} + - type: {{ $type }} + name: name # TODO + kwargs: + {{- if eq $type "swift" }} + username: {{ .username }} + password: {{ .password }} + tenant_name: {{ .tenant_name }} + tenant_id: {{ .tenant_id }} + region_name: {{ .region_name }} + auth_url: {{ .auth_url }} + auth_version: {{ .auth_version }} + user_domain_name: {{ .user_domain_name }} + {{- else if eq $type "s3" }} + bucket_name: {{ .bucket | default "null" }} + endpoint_url: {{ .endpoint_url }} + access_key_id: {{ .access_key_id }} + secret_access_key: {{ .secret_access_key }} + # region: {{ .region }} + {{- end }} + {{- end }} + +schemes: + {{- range .Values.config.registrar.schemes | default list }} + - {{ toYaml . }} + {{- end }} + +backends: + - type: eoxserver + filter: + kwargs: + instance_base_path: /var/www/pvs/dev + instance_name: pvs_instance + + mapping: + {{- range $product_type_name, $product_type := .Values.config.products.types }} + {{ $product_type_name }}: + {{- range $level := list "Level_1" "Level_3" "Level-2A" }} + {{ $level }}: + product_type_name: {{ $product_type_name | quote }} + collections: + {{- range $collection_name, $collection_conf := $.Values.config.collections }} + {{- if and (has $product_type_name $collection_conf.product_types) (has $level $collection_conf.product_levels) }} + - {{ $collection_name }} + {{- end }} + {{- end }} + coverages: + {{- toYaml $product_type.coverages | nindent 16 }} + masks: + {{- range $mask_name, $_ := $product_type.masks }} + {{ $mask_name }}: {{ $mask_name }} + {{- end }} + {{- end }} + {{- end }} diff --git a/chart/templates/_helpers.tpl b/chart/templates/_helpers.tpl new file mode 100644 index 0000000000000000000000000000000000000000..64cea4aac060290ea742a2a95fe0371a87fb55ca --- /dev/null +++ b/chart/templates/_helpers.tpl @@ -0,0 +1,63 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "vs.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 53 chars leaving space for 10 additional chars because some +Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vs.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 53 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 53 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 53 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vs.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "vs.labels" -}} +helm.sh/chart: {{ include "vs.chart" . }} +{{ include "vs.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "vs.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vs.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "vs.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "vs.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/chart/templates/cache-configmap.yaml b/chart/templates/cache-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d72924e9c2aef9db31325844187e43e83613a32a --- /dev/null +++ b/chart/templates/cache-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "vs.fullname" . }}-cache +data: + {{ (tpl (.Files.Glob "files/mapcache.xml").AsConfig . ) | nindent 2}} diff --git a/chart/templates/client-configmap.yaml b/chart/templates/client-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c3cc92b7b9b81947b3794facbf24651f3d9234a1 --- /dev/null +++ b/chart/templates/client-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "vs.fullname" . }}-client +data: + {{ (tpl (.Files.Glob "files/index.html").AsConfig . ) | nindent 2}} diff --git a/chart/templates/client-deployment.yaml b/chart/templates/client-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae66fa84a8155d70d9b7b7f53bfc2cce92b3b9da --- /dev/null +++ b/chart/templates/client-deployment.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "vs.fullname" . }}-client + labels: + {{- include "vs.labels" . | nindent 4 }} + app.kubernetes.io/service: client +spec: + replicas: {{ .Values.client.replicaCount }} + selector: + matchLabels: + {{- include "vs.selectorLabels" . | nindent 6 }} + app.kubernetes.io/service: client + strategy: + rollingUpdate: + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + annotations: + prometheus.io/scrape: "false" + labels: + {{- include "vs.selectorLabels" . | nindent 8 }} + app.kubernetes.io/service: client + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-client + image: 'registry.gitlab.eox.at/esa/prism/vs/pvs_client:{{ .Values.image.tag | default .Chart.AppVersion }}' + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + resources: + {{- toYaml .Values.client.resources | nindent 12 }} + volumeMounts: + - mountPath: /usr/share/nginx/html/index.html + name: client + subPath: index.html + {{- with .Values.client.affinity | default .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - configMap: + items: + - key: index.html + path: index.html + name: {{ include "vs.fullname" . }}-client + name: client diff --git a/chart/templates/client-service.yaml b/chart/templates/client-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..587cba40fdc74e0625eb11aa07e49c181d45ba3c --- /dev/null +++ b/chart/templates/client-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "vs.fullname" . }}-client + labels: + {{- include "vs.labels" . | nindent 4 }} + app.kubernetes.io/service: client +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "vs.selectorLabels" . | nindent 4 }} + app.kubernetes.io/service: client diff --git a/chart/templates/ingress.yaml b/chart/templates/ingress.yaml new file mode 100644 index 0000000000000000000000000000000000000000..deb7100d730f5726b987dae2d1bef6dd6b076d27 --- /dev/null +++ b/chart/templates/ingress.yaml @@ -0,0 +1,54 @@ +{{- $fullName := include "vs.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "vs.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + nginx.ingress.kubernetes.io/rewrite-target: /$1 + {{- end }} +spec: + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + - path: /(ows.*) + backend: + serviceName: {{ $fullName }}-renderer + servicePort: http + - path: /(opensearch.*) + backend: + serviceName: {{ $fullName }}-renderer + servicePort: http + - path: /(admin.*) + backend: + serviceName: {{ $fullName }}-renderer + servicePort: http + - path: /(.*) + backend: + serviceName: {{ $fullName }}-client + servicePort: http + # - path: /cache/(.*) + # backend: + # serviceName: {{ $fullName }}-cache + # servicePort: http + {{- end }} diff --git a/chart/templates/init-db-configmap.yaml b/chart/templates/init-db-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01b1be69630d3a7f51eb4e7bd0d2ec198fa8f225 --- /dev/null +++ b/chart/templates/init-db-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "vs.fullname" . }}-init-db +data: + {{ (tpl (.Files.Glob "files/init-db.sh").AsConfig . ) | nindent 2}} diff --git a/chart/templates/preprocessor-config-configmap.yaml b/chart/templates/preprocessor-config-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f74f82022f1939e57c87a5f6b224cca8dc20a138 --- /dev/null +++ b/chart/templates/preprocessor-config-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "vs.fullname" . }}-preprocessor-config +data: + {{ (tpl (.Files.Glob "files/preprocessor-config.yaml").AsConfig . ) | nindent 2 }} diff --git a/chart/templates/registrar-config-configmap.yaml b/chart/templates/registrar-config-configmap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1eaf7ab53e7a0b6c59e7bfb04fec1fa49b8b9f8e --- /dev/null +++ b/chart/templates/registrar-config-configmap.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "vs.fullname" . }}-registrar-config +data: + {{ (tpl (.Files.Glob "files/registrar-config.yaml").AsConfig . ) | nindent 2}} diff --git a/chart/templates/registrar-deployment.yaml b/chart/templates/registrar-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e4b43743d8fac7f6a9dcd1135798a2e183d0719 --- /dev/null +++ b/chart/templates/registrar-deployment.yaml @@ -0,0 +1,96 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "vs.fullname" . }}-registrar + labels: + {{- include "vs.labels" . | nindent 4 }} + app.kubernetes.io/service: registrar +spec: + replicas: {{ .Values.registrar.replicaCount }} + selector: + matchLabels: + {{- include "vs.selectorLabels" . | nindent 6 }} + app.kubernetes.io/service: registrar + strategy: + rollingUpdate: + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + annotations: + prometheus.io/scrape: "false" + labels: + {{- include "vs.selectorLabels" . | nindent 8 }} + app.kubernetes.io/service: registrar + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-registrar + image: "registry.gitlab.eox.at/esa/prism/vs/pvs_core:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: + {{- toYaml .Values.registrar.resources | nindent 12 }} + args: + - /run-registrar.sh + env: + {{- range $key, $value := .Values.config.general }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.config.database }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + - name: DB_HOST + value: {{ .Release.Name }}-database + {{- range $key, $value := .Values.config.django }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.config.objectStorage.data }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.config.redis }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + - name: REDIS_HOST + value: {{ .Release.Name }}-redis-master + - name: INIT_SCRIPTS + value: /configure.sh /init-db.sh /initialized.sh + - name: INSTALL_DIR + value: /var/www/pvs/dev/ + - name: INSTANCE_ID + value: prism-view-server_registrar + - name: STARTUP_SCRIPTS + value: /wait-initialized.sh + - name: WAIT_SERVICES + value: {{ .Release.Name }}-database:{{ .Values.config.database.DB_PORT }} {{ .Release.Name }}-redis-master:{{ .Values.config.redis.REDIS_PORT }} + volumeMounts: + - mountPath: /init-db.sh + name: init-db + subPath: init-db.sh + - mountPath: /config.yaml + name: registrar-config + subPath: registrar-config.yaml + {{- with .Values.registrar.affinity | default .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - configMap: + items: + - key: init-db.sh + path: init-db.sh + name: {{ include "vs.fullname" . }}-init-db + name: init-db + - configMap: + items: + - key: registrar-config.yaml + path: registrar-config.yaml + name: {{ include "vs.fullname" . }}-registrar-config + name: registrar-config diff --git a/chart/templates/registrar-service.yaml b/chart/templates/registrar-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6762a3b011af31b02b2db32cc4993f1afee17c61 --- /dev/null +++ b/chart/templates/registrar-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "vs.fullname" . }}-registrar + labels: + {{- include "vs.labels" . | nindent 4 }} + app.kubernetes.io/service: registrar +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "vs.selectorLabels" . | nindent 4 }} + app.kubernetes.io/service: registrar diff --git a/chart/templates/renderer-deployment.yaml b/chart/templates/renderer-deployment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0bcb7cd0ca2b45f04514af432408f22b6c3439a5 --- /dev/null +++ b/chart/templates/renderer-deployment.yaml @@ -0,0 +1,99 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "vs.fullname" . }}-renderer + labels: + {{- include "vs.labels" . | nindent 4 }} + app.kubernetes.io/service: renderer +spec: + replicas: {{ .Values.renderer.replicaCount }} + selector: + matchLabels: + {{- include "vs.selectorLabels" . | nindent 6 }} + app.kubernetes.io/service: renderer + strategy: + rollingUpdate: + maxUnavailable: 0 + type: RollingUpdate + template: + metadata: + annotations: + prometheus.io/scrape: "false" + labels: + {{- include "vs.selectorLabels" . | nindent 8 }} + app.kubernetes.io/service: renderer + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }}-renderer + image: "registry.gitlab.eox.at/esa/prism/vs/pvs_core:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: / + port: http + readinessProbe: + httpGet: + path: / + port: http + startupProbe: + httpGet: + path: / + port: http + failureThreshold: 30 + periodSeconds: 10 + resources: + {{- toYaml .Values.renderer.resources | nindent 12 }} + args: + - /run-httpd.sh + env: + {{- range $key, $value := .Values.config.general }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.config.database }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + - name: DB_HOST + value: {{ .Release.Name }}-database + {{- range $key, $value := .Values.config.django }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.config.objectStorage.data }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + - name: INIT_SCRIPTS + value: /configure.sh /init-db.sh /initialized.sh + - name: INSTALL_DIR + value: /var/www/pvs/dev/ + - name: INSTANCE_ID + value: prism-view-server_renderer + - name: STARTUP_SCRIPTS + value: /wait-initialized.sh + - name: WAIT_SERVICES + value: {{ .Release.Name }}-database:{{ .Values.config.database.DB_PORT }} + volumeMounts: + - mountPath: /init-db.sh + name: init-db + subPath: init-db.sh + {{- with .Values.renderer.affinity | default .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - configMap: + items: + - key: init-db.sh + path: init-db.sh + name: {{ include "vs.fullname" . }}-init-db + name: init-db diff --git a/chart/templates/renderer-service.yaml b/chart/templates/renderer-service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93be22e38f4e8d98bf8255ce4c41505aee9de3b0 --- /dev/null +++ b/chart/templates/renderer-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "vs.fullname" . }}-renderer + labels: + {{- include "vs.labels" . | nindent 4 }} + app.kubernetes.io/service: renderer +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "vs.selectorLabels" . | nindent 4 }} + app.kubernetes.io/service: renderer diff --git a/chart/templates/tests/test-connection.yaml b/chart/templates/tests/test-connection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5ca4058fc45d388c350608ee0f6bee66e7be830b --- /dev/null +++ b/chart/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "vs.fullname" . }}-test-connection" + labels: + {{- include "vs.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "vs.fullname" . }}-renderer:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/chart/values.yaml b/chart/values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..51f042be10818c9419f3512f19d1f381e3d3808b --- /dev/null +++ b/chart/values.yaml @@ -0,0 +1,371 @@ +config: + general: + COLLECTION: COLLECTION + CPL_VSIL_CURL_ALLOWED_EXTENSIONS: .TIF,.tif,.xml + GDAL_DISABLE_READDIR_ON_OPEN: "TRUE" + COLLECT_STATIC: "false" + database: + DB_NAME: dbname + DB_PORT: "5432" + DB_PW: dbpw + DB_USER: dbuser + POSTGRES_DB: dbname + POSTGRES_PASSWORD: dbpw + POSTGRES_USER: dbuser + django: + DJANGO_MAIL: office@eox.at + DJANGO_PASSWORD: djangopw + DJANGO_USER: djangouser + objectStorage: + download: + type: swift + username: "username" + password: "password" + tenant_name: "tenant_name" + tenant_id: "tenant_id" + region_name: "region_name" + auth_url: "auth_url" + auth_url_short: "auth_url_short" + auth_version: "auth_version" + user_domain_name: "user_domain_name" + data: + type: swift + username: "username" + password: "password" + tenant_name: "tenant_name" + tenant_id: "tenant_id" + region_name: "region_name" + auth_url: "auth_url" + auth_url_short: "auth_url_short" + auth_version: "auth_version" + user_domain_name: "user_domain_name" + cache: + type: S3 + bucket: "bucket" + endpoint_url: "endpoint_url" + access_key_id: "access_key_id" + secret_access_key: "secret_access_key" + region: "region" + redis: + REDIS_PORT: "6379" + REDIS_PREPROCESS_QUEUE_KEY: preprocess_queue + REDIS_QUEUE_KEY: seed_queue + REDIS_REGISTER_QUEUE_KEY: register_queue + REDIS_REGISTERED_SET_KEY: registered_set + REDIS_SEED_QUEUE_KEY: seed_queue + REDIS_SET_KEY: registered_set + + client: + layers: {} + # VHR_IMAGE_2018_Level_1: + # display_color: '#eb3700' + # title: VHR Image 2018 Level 1 + # layer: VHR_IMAGE_2018_Level_1__TRUE_COLOR + # sub_layers: + # VHR_IMAGE_2018_Level_1__TRUE_COLOR: + # label: VHR Image 2018 True Color + # VHR_IMAGE_2018_Level_1__masked_validity: + # label: VHR Image 2018 True Color with masked validity + # VHR_IMAGE_2018_Level_1__FALSE_COLOR: + # label: VHR Image 2018 False Color + # VHR_IMAGE_2018_Level_1__NDVI: + # label: VHR Image 2018 NDVI + # VHR_IMAGE_2018_Level_3: + # display_color: '#eb3700' + # title: VHR Image 2018 Level 3 + # layer: VHR_IMAGE_2018_Level_3__TRUE_COLOR + # sub_layers: + # VHR_IMAGE_2018_Level_3__TRUE_COLOR: + # label: VHR Image 2018 True Color + # VHR_IMAGE_2018_Level_3__masked_validity: + # label: VHR Image 2018 True Color with masked validity + # VHR_IMAGE_2018_Level_3__FALSE_COLOR: + # label: VHR Image 2018 False Color + # VHR_IMAGE_2018_Level_3__NDVI: + # label: VHR Image 2018 NDVI + overlay_layers: {} + # VHR_IMAGE_2018_Level_3__outlines: + # display_color: '#187465' + # title: VHR Image 2018 Level 3 Outlines + # layer: VHR_IMAGE_2018_Level_3__outlines + # VHR_IMAGE_2018_Level_3__masked_validity__Full: + # display_color: '#187465' + # title: VHR Image 2018 Level 3 True Color with masked validity Full Coverage + # layer: VHR_IMAGE_2018_Level_3__masked_validity__Full + # VHR_IMAGE_2018_Level_3__Full: + # display_color: '#187465' + # title: VHR Image 2018 Level 3 True Color Full Coverage + # layer: VHR_IMAGE_2018_Level_3__Full + + # cache related options + cache: + metadata: + title: PRISM Data Access Service (PASS) developed by EOX + abstract: PRISM Data Access Service (PASS) developed by EOX + url: https://vhr18.pvs.prism.eox.at/cache/ows + keyword: view service + accessconstraints: UNKNOWN + fees: UNKNOWN + contactname: Stephan Meissl + contactphone: Please contact via mail. + contactfacsimile: None + contactorganization: EOX IT Services GmbH + contactcity: Vienna + contactstateorprovince: Vienna + contactpostcode: 1090 + contactcountry: Austria + contactelectronicmailaddress: office@eox.at + contactposition: CTO + providername: EOX + providerurl: https://eox.at + inspire_profile: true + inspire_metadataurl: TBD + defaultlanguage: eng + language: eng + services: + wms: + enabled: true + wmts: + enabled: true + connection_timeout: 10 + timeout: 120 + expires: 3600 + key: /{tileset}/{grid}/{dim}/{z}/{x}/{y}.{ext} + tilesets: {} + # VHR_IMAGE_2018: + # title: VHR Image 2018 True Color + # abstract: VHR Image 2018 True Color + # VHR_IMAGE_2018__TRUE_COLOR: + # title: VHR Image 2018 True Color + # abstract: VHR Image 2018 True Color + # VHR_IMAGE_2018__FALSE_COLOR: + # title: VHR Image 2018 False Color + # abstract: VHR Image 2018 False Color + # VHR_IMAGE_2018__NDVI: + # title: VHR Image 2018 NDVI + # abstract: VHR Image 2018 NDVI + # style: earth + # VHR_IMAGE_2018_Level_1__TRUE_COLOR: + # title: VHR Image 2018 Level 1 True Color + # abstract: VHR Image 2018 Level 1 True Color + # VHR_IMAGE_2018_Level_1__FALSE_COLOR: + # title: VHR Image 2018 Level 1 False Color + # abstract: VHR Image 2018 Level 1 False Color + # VHR_IMAGE_2018_Level_1__NDVI: + # title: VHR Image 2018 Level 1 NDVI + # abstract: VHR Image 2018 Level 1 NDVI + # style: earth + # VHR_IMAGE_2018_Level_1__TRUE_COLOR: + # title: VHR Image 2018 Level 3 True Color + # abstract: VHR Image 2018 Level 3 True Color + # VHR_IMAGE_2018_Level_1__FALSE_COLOR: + # title: VHR Image 2018 Level 3 False Color + # abstract: VHR Image 2018 Level 3 False Color + # VHR_IMAGE_2018_Level_1__NDVI: + # title: VHR Image 2018 Level 3 NDVI + # abstract: VHR Image 2018 Level 3 NDVI + # style: earth + + preprocessor: + metadata_glob: '*GSC*.xml' + type_extractor: + xpath: + - /gsc:report/gsc:opt_metadata/gml:using/eop:EarthObservationEquipment/eop:platform/eop:Platform/eop:shortName/text() + level_extractor: + # xpath can also be a list of xpaths to be tried one after another + xpath: substring-after(substring-after(/gsc:report/gsc:opt_metadata/gml:metaDataProperty/gsc:EarthObservationMetaData/eop:parentIdentifier/text(), '/'), '/') + preprocessing: + defaults: + move_files: true + data_file_globs: + - '*.tif' + - '*.jp2' + output: + options: + format: COG + dstSRS: 'EPSG:4326' + dstNodata: 0 + creationOptions: + - BLOCKSIZE=512 + - COMPRESS=DEFLATE + - NUM_THREADS=8 + - BIGTIFF=IF_SAFER + - OVERVIEWS=AUTO + types: {} + # PH1B: # just to pass validation + # nested: true + + registrar: + schemes: + - type: gsc + + # mapping of collection name to objects + collections: {} + # VHR_IMAGE_2018: + # product_types: + # - PL00 + # - DM02 + # - KS03 + # - KS04 + # - PH1A + # - PH1B + # - SP06 + # - SP07 + # - SW00 + # - TR00 + # product_levels: + # - Level_1 + # - Level_3 + # coverage_types: + # - RGBNir + + products: + type_extractor: + xpath: + - /gsc:report/gsc:opt_metadata/gml:using/eop:EarthObservationEquipment/eop:platform/eop:Platform/eop:shortName/text() + - /gsc:report/gsc:sar_metadata/gml:using/eop:EarthObservationEquipment/eop:platform/eop:Platform/eop:shortName/text() + namespace_map: + + level_extractor: + xpath: + namespace_map: + + types: {} + # PL00: + # coverages: + # PL00: RGBNir + # default_browse: TRUE_COLOR + # browses: + # TRUE_COLOR: + # red: + # expression: red + # range: [1000, 15000] + # nodata: 0 + # green: + # expression: green + # range: [1000, 15000] + # nodata: 0 + # blue: + # expression: blue + # range: [1000, 15000] + # nodata: 0 + # FALSE_COLOR: + # red: + # expression: nir + # range: [1000, 15000] + # nodata: 0 + # green: + # expression: red + # range: [1000, 15000] + # nodata: 0 + # blue: + # expression: green + # range: [1000, 15000] + # nodata: 0 + # NDVI: + # grey: + # expression: (nir-red)/(nir+red) + # range: [-1, 1] + # masks: + # validity: + # validity: true + + coverages: {} + # only RGBNir? SAR? complete list with all options here? + +database: + persistence: + enabled: false + # existingClaim: eoepca-rm-db-pvc + postgresqlUsername: dbuser + postgresqlPassword: dbpw + postgresqlDatabase: dbname + postgresqlPostgresPassword: dbpgpw + initdbScripts: + enablePostgis.sh: | + echo "Enabling postgis" + PGPASSWORD="$POSTGRES_POSTGRES_PASSWORD" psql -U postgres -d "${POSTGRES_DB}" -c "CREATE EXTENSION postgis;" + +redis: + usePassword: false + # persistence: + # existingClaim: redis + # master: + # persistence: + # enabled: true + cluster: + enabled: false + +preprocessor: + replicaCount: 1 + resources: + limits: + cpu: 1.5 + memory: 6Gi + requests: + cpu: 0.5 + memory: 0.5Gi + affinity: {} + +registrar: + replicaCount: 1 + resources: + limits: + cpu: 1.5 + memory: 6Gi + requests: + cpu: 0.5 + memory: 0.5Gi + affinity: {} + +renderer: + replicaCount: 1 + resources: + limits: + cpu: 1.5 + memory: 6Gi + requests: + cpu: 0.5 + memory: 0.5Gi + affinity: {} + +client: + replicaCount: 1 + resources: + limits: + cpu: 0.5 + memory: 1Gi + requests: + cpu: 0.1 + memory: 0.1Gi + + +replicaCount: 1 + +image: + repository: registry.gitlab.eox.at/esa/prism/vs + pullPolicy: IfNotPresent + tag: "registrar-modularization" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +service: + type: ClusterIP + port: 80 + +ingress: + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + nginx.ingress.kubernetes.io/proxy-read-timeout: "600" + nginx.ingress.kubernetes.io/enable-cors: "true" + hosts: + - host: vs.local + tls: + - hosts: + - vs.local + secretName: vs-secret + +affinity: {} diff --git a/client/Dockerfile b/client/Dockerfile index 9da01c45e88e4e9e28b8854bae6f7554d9d9bf5e..2255e745416052b932c5af8c7b6e0375b76773d9 100644 --- a/client/Dockerfile +++ b/client/Dockerfile @@ -31,6 +31,6 @@ LABEL name="prism view server client" \ vendor="EOX IT Services GmbH " \ license="MIT Copyright (C) 2019 EOX IT Services GmbH " \ type="prism view server client" \ - version="0.0.1-dev" + version="1.0.0-rc.2" COPY html/ /usr/share/nginx/html/ diff --git a/client/html/View-Server_-_User-Guide_v1.1.0.pdf b/client/html/View-Server_-_User-Guide_v1.1.1.pdf similarity index 99% rename from client/html/View-Server_-_User-Guide_v1.1.0.pdf rename to client/html/View-Server_-_User-Guide_v1.1.1.pdf index 191ee76989bbb51cb75093ceb93b129a81f962c4..5da1ce74147ee0b8705a6610fa1e963c3452094e 100644 Binary files a/client/html/View-Server_-_User-Guide_v1.1.0.pdf and b/client/html/View-Server_-_User-Guide_v1.1.1.pdf differ diff --git a/client/html/prism.js b/client/html/prism.js index 734b60a673df2e5a033776e85065fda688893a3c..be1602a24bed84e2a015f33e8bba06f6b9b2ace2 100644 --- a/client/html/prism.js +++ b/client/html/prism.js @@ -168570,7 +168570,7 @@ return /******/ (function(modules) { // webpackBootstrap /* 1068 */ /***/ (function(module, exports) { - module.exports = {"layer_failed":"Failed to access layer '{{value}}'","search_error":"An error occurred during the search","search_norecords":"No products matched the search","search_n_layers_selected":"One layer selected to show","search_n_layers_selected_plural":"{{count}} layers selected to show","load_more":"Load {{count}} more","download_norecords":"No products selected for download","terms_and_conditions":"I have read and agree to the terms & conditions","start_download":"Start download of one product","start_download_plural":"Start download of {{count}} products","confirm_download":"'Download' starts the download of one product","confirm_download_plural":"'Download' starts the download of {{count}} products","confirm_note":"Note, the browser might open multiple confirmation dialogs or issue a warning. Besides, typically browsers have a limit of 6 concurrent connections. If popups are blocked by your browser, please enable them for this site.","download_size_warning":"The estimated size of the download without compression is {{estimated_size}}MB. This might exceed the capabilities of the service.","max_bbox_warning":"Maximum size of bounding box: {{max_bbox_size}} was exceeded by: {{max_bbox_exceed}} on axis: {{max_bbox_axis}}.","max_resolution_warning":"Maximum resolution: {{max_resolution}} was exceeded. Attempt to download might fail with an error.","download_bands_warning":"The selected format supports at most {{maxBands}} bands, but {{requestedBands}} are requested.","timefilter_warning":"Search results may differ from products shown on map because of using a separate time filter","advancedfilter_warning":"Search results may differ from products shown on map because of using additional filters","toomanyresults_warning":"Search results may differ from products shown on map because search results are too big","dynamic-histogram-title":"If enabled, time distributions are shown for current spatial filter.\nIf disabled, they are shown globally.","tutorial1":"

Welcome to the Catalog Client

Let's briefly walk through the main functionalities. Hit Next to start.

Feel free to Skip any time and to resume later using the button at the bottom right.

You can also find detailed information about the Client and referenced services here: User guide document HTML or PDF

.","tutorial2":"

The Map

Noticed the map in the back? It displays available satellite data.

Navigate to your area of interest by panning and zooming, either using left click + drag and mouse wheel scroll or one finger drag and two finger pinch.

Satisfied with the displayed data? Not yet? Let's change the time of interest Next.

","tutorial3":"

The Timeslider

The timeslider displays the distribution of data over time. The gray area shows the currently selected time interval.

Navigate to your time of interest again by panning (only in the bottom area) and zooming. Change the time interval selection by using left click + drag in the top area.

Did you notice how the data displayed on the map and the search results changed?

","tutorial4":"

Search Results

The satellite data within the current area and time of interest is listed here.

Inspect details by hovering over an item and hitting or select items for download.

Your result list is too long? Let's apply more filters Next.

","tutorial5":"

Filters

These filters allow to narrow down the search results. Note that the time and spatial filters are already applied via the map and timeslider.

Apply Additional Filters by selecting or typing the values of interest.

Did the search result list get smaller?

","tutorial6":"

Map layers

Available map and data layers are listed here.

Select and deselect Layers for viewing and searching. Overlays and Base Layers are only for the viewing on the map.

Satisfied with your search results?

Iterate all steps until you find the right satellite data to enjoy.

"} + module.exports = {"layer_failed":"Failed to access layer '{{value}}'","search_error":"An error occurred during the search","search_norecords":"No products matched the search","search_n_layers_selected":"One layer selected to show","search_n_layers_selected_plural":"{{count}} layers selected to show","load_more":"Load {{count}} more","download_norecords":"No products selected for download","terms_and_conditions":"I have read and agree to the terms & conditions","start_download":"Start download of one product","start_download_plural":"Start download of {{count}} products","confirm_download":"'Download' starts the download of one product","confirm_download_plural":"'Download' starts the download of {{count}} products","confirm_note":"Note, the browser might open multiple confirmation dialogs or issue a warning. Besides, typically browsers have a limit of 6 concurrent connections. If popups are blocked by your browser, please enable them for this site.","download_size_warning":"The estimated size of the download without compression is {{estimated_size}}MB. This might exceed the capabilities of the service.","max_bbox_warning":"Maximum size of bounding box: {{max_bbox_size}} was exceeded by: {{max_bbox_exceed}} on axis: {{max_bbox_axis}}.","max_resolution_warning":"Maximum resolution: {{max_resolution}} was exceeded. Attempt to download might fail with an error.","download_bands_warning":"The selected format supports at most {{maxBands}} bands, but {{requestedBands}} are requested.","timefilter_warning":"Search results may differ from products shown on map because of using a separate time filter","advancedfilter_warning":"Search results may differ from products shown on map because of using additional filters","toomanyresults_warning":"Search results may differ from products shown on map because search results are too big","dynamic-histogram-title":"If enabled, time distributions are shown for current spatial filter.\nIf disabled, they are shown globally.","tutorial1":"

Welcome to the Catalog Client

Let's briefly walk through the main functionalities. Hit Next to start.

Feel free to Skip any time and to resume later using the button at the bottom right.

You can also find detailed information about the Client and referenced services here: User guide document HTML or PDF

.","tutorial2":"

The Map

Noticed the map in the back? It displays available satellite data.

Navigate to your area of interest by panning and zooming, either using left click + drag and mouse wheel scroll or one finger drag and two finger pinch.

Satisfied with the displayed data? Not yet? Let's change the time of interest Next.

","tutorial3":"

The Timeslider

The timeslider displays the distribution of data over time. The gray area shows the currently selected time interval.

Navigate to your time of interest again by panning (only in the bottom area) and zooming. Change the time interval selection by using left click + drag in the top area.

Did you notice how the data displayed on the map and the search results changed?

","tutorial4":"

Search Results

The satellite data within the current area and time of interest is listed here.

Inspect details by hovering over an item and hitting or select items for download.

Your result list is too long? Let's apply more filters Next.

","tutorial5":"

Filters

These filters allow to narrow down the search results. Note that the time and spatial filters are already applied via the map and timeslider.

Apply Additional Filters by selecting or typing the values of interest.

Did the search result list get smaller?

","tutorial6":"

Map layers

Available map and data layers are listed here.

Select and deselect Layers for viewing and searching. Overlays and Base Layers are only for the viewing on the map.

Satisfied with your search results?

Iterate all steps until you find the right satellite data to enjoy.

"} /***/ }), /* 1069 */ diff --git a/config/dem_index-dev.html b/config/dem_index-dev.html index 6251a3c153b5323dbb962bc7aadea1607286b396..c4f904b8400dbd65a8698664ecd196cf7db3aa5b 100644 --- a/config/dem_index-dev.html +++ b/config/dem_index-dev.html @@ -3,6 +3,7 @@ + PRISM View Server diff --git a/config/dem_index-ops.html b/config/dem_index-ops.html index c8c6083111405529c2344632f30992da0df5df57..77b34692deedd764c8a770fbfbf0e33e91d5da18 100644 --- a/config/dem_index-ops.html +++ b/config/dem_index-ops.html @@ -3,6 +3,7 @@ + PRISM View Server diff --git a/config/dem_init-db.sh b/config/dem_init-db.sh index 648f4b051fbac1b0e3ff40c36aec93cb259256d1..cfc2c4ec310c545a05bd12d87fff5a562f8ca72f 100644 --- a/config/dem_init-db.sh +++ b/config/dem_init-db.sh @@ -42,7 +42,7 @@ if python3 manage.py id check "${COLLECTION}"; then python3 manage.py browsetype create "${COLLECTION}"_Product_COP-DEM_GLO-90-DGED --traceback \ --red "gray" \ --red-range -100 4000 \ - --red-nodata 0 + --red-nodata 0 python3 manage.py collectiontype create "${COLLECTION}"_Collection --traceback \ --coverage-type "float32_grayscale" \ @@ -66,22 +66,6 @@ if python3 manage.py id check "${COLLECTION}"; then echo "Provided collection '${COLLECTION}' not valid." fi - python3 manage.py storageauth create auth-cloud-ovh "${OS_AUTH_URL_SHORT}" \ - --type keystone \ - -p auth-version "${ST_AUTH_VERSION}" \ - -p identity-api-version="${ST_AUTH_VERSION}" \ - -p username "${OS_USERNAME}" \ - -p password "${OS_PASSWORD}" \ - -p tenant-name "${OS_TENANT_NAME}" \ - -p tenant-id "${OS_TENANT_ID}" \ - -p region-name "${OS_REGION_NAME}" - - python3 manage.py storage create \ - ${UPLOAD_CONTAINER} ${UPLOAD_CONTAINER} \ - --type swift \ - --storage-auth auth-cloud-ovh - - else echo "Using existing database" fi diff --git a/config/dem_registrar-config.yml b/config/dem_registrar-config.yml new file mode 100644 index 0000000000000000000000000000000000000000..5c255c039420827c238520095ca71f3f37460acb --- /dev/null +++ b/config/dem_registrar-config.yml @@ -0,0 +1,75 @@ +sources: + - type: swift + name: !env '${UPLOAD_CONTAINER}' + kwargs: + username: !env '${OS_USERNAME}' + password: !env '${OS_PASSWORD}' + tenant_name: !env '${OS_TENANT_NAME}' + tenant_id: !env '${OS_TENANT_ID}' + region_name: !env '${OS_REGION_NAME}' + auth_version: !env '${ST_AUTH_VERSION}' + auth_url: !env '${OS_AUTH_URL}' + auth_url_short: !env '${OS_AUTH_URL_SHORT}' + container: !env '${UPLOAD_CONTAINER}' + +schemes: + - type: gsc + kwargs: + level_re: '([A-Z0-9-_]+)/.*' + +backends: + - type: eoxserver + filter: + kwargs: + instance_base_path: /var/www/pvs/dev + instance_name: pvs_instance + mapping: + DEM1: + COP-DEM_EEA-10-DGED: + product_type_name: !env '${COLLECTION}_Product_COP-DEM_EEA-10-DGED' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_COP-DEM_EEA-10-DGED' + coverages: + DEM1: float32_grayscale + COP-DEM_EEA-10-INSP: + product_type_name: !env '${COLLECTION}_Product_COP-DEM_EEA-10-INSP' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_COP-DEM_EEA-10-INSP' + coverages: + DEM1: float32_grayscale + COP-DEM_GLO-30-DGED: + product_type_name: !env '${COLLECTION}_Product_COP-DEM_GLO-30-DGED' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_COP-DEM_GLO-30-DGED' + coverages: + DEM1: float32_grayscale + COP-DEM_GLO-90-DGED: + product_type_name: !env '${COLLECTION}_Product_COP-DEM_GLO-90-DGED' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_COP-DEM_GLO-90-DGED' + coverages: + DEM1: float32_grayscale + COP-DEM_GLO-30-DTED: + product_type_name: !env '${COLLECTION}_Product_COP-DEM_GLO-30-DTED' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_COP-DEM_GLO-30-DTED' + coverages: + DEM1: int16_grayscale + COP-DEM_GLO-90-DTED: + product_type_name: !env '${COLLECTION}_Product_COP-DEM_GLO-90-DTED' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_COP-DEM_GLO-90-DTED' + coverages: + DEM1: int16_grayscale + +post_handlers: + - path: registrar.post_handlers.ReportingPostHandler + kwargs: + service_url: dem.pass.copernicus.eu + reporting_dir: /mnt/reports/ diff --git a/config/emg_index-dev.html b/config/emg_index-dev.html index 9fdbd8e5a3ec122f5adc9cbcb8590a5d1f93d96e..09e3e675909e41269837eb5108c1b5b7c5f9fec5 100644 --- a/config/emg_index-dev.html +++ b/config/emg_index-dev.html @@ -3,6 +3,7 @@ + PRISM View Server diff --git a/config/emg_index-ops.html b/config/emg_index-ops.html index d53feee51897b8140659d17d89a3df538a5b746a..171adcd85091adc22d7482b443e0594a298be070 100644 --- a/config/emg_index-ops.html +++ b/config/emg_index-ops.html @@ -3,6 +3,7 @@ + PRISM View Server diff --git a/config/emg_init-db.sh b/config/emg_init-db.sh index 5944a22d495d77b97a2b2e7dfa9cee068742c984..7ce4d7e77ce45259a46dd32d869c66fc93c79bc7 100644 --- a/config/emg_init-db.sh +++ b/config/emg_init-db.sh @@ -975,23 +975,6 @@ if python3 manage.py id check "${COLLECTION}"; then echo "Provided collection '${COLLECTION}' not valid." fi - python3 manage.py storageauth create auth-cloud-ovh "${OS_AUTH_URL_SHORT}" \ - --type keystone \ - -p auth-version "${ST_AUTH_VERSION}" \ - -p identity-api-version="${ST_AUTH_VERSION}" \ - -p username "${OS_USERNAME}" \ - -p password "${OS_PASSWORD}" \ - -p tenant-name "${OS_TENANT_NAME}" \ - -p tenant-id "${OS_TENANT_ID}" \ - -p region-name "${OS_REGION_NAME}" - - python3 manage.py storage create \ - ${UPLOAD_CONTAINER} ${UPLOAD_CONTAINER} \ - --type swift \ - --storage-auth auth-cloud-ovh - - - else echo "Using existing database" fi \ No newline at end of file diff --git a/config/emg_registrar-config.yml b/config/emg_registrar-config.yml new file mode 100644 index 0000000000000000000000000000000000000000..e9103d8db2aa011cfbbdcff1e8e111246880e778 --- /dev/null +++ b/config/emg_registrar-config.yml @@ -0,0 +1,275 @@ +sources: + - type: swift + name: !env '${UPLOAD_CONTAINER}' + kwargs: + username: !env '${OS_USERNAME}' + password: !env '${OS_PASSWORD}' + tenant_name: !env '${OS_TENANT_NAME}' + tenant_id: !env '${OS_TENANT_ID}' + region_name: !env '${OS_REGION_NAME}' + auth_version: !env '${ST_AUTH_VERSION}' + auth_url: !env '${OS_AUTH_URL}' + auth_url_short: !env '${OS_AUTH_URL_SHORT}' + container: !env '${UPLOAD_CONTAINER}' + +schemes: + - type: gsc + +backends: + - type: eoxserver + filter: + kwargs: + instance_base_path: /var/www/pvs/dev + instance_name: pvs_instance + mapping: + CS00: + ~: + product_type_name: !env '${COLLECTION}_Product_CS00' + collections: + - !env '${COLLECTION}' + coverages: + CS00: sar_hh_gray + CS01: + ~: + product_type_name: !env '${COLLECTION}_Product_CS01' + collections: + - !env '${COLLECTION}' + coverages: + CS01: sar_hh_gray + CS02: + ~: + product_type_name: !env '${COLLECTION}_Product_CS02' + collections: + - !env '${COLLECTION}' + coverages: + CS02: sar_hh_gray + CS03: + ~: + product_type_name: !env '${COLLECTION}_Product_CS03' + collections: + - !env '${COLLECTION}' + coverages: + CS03: sar_hh_gray + CS04: + ~: + product_type_name: !env '${COLLECTION}_Product_CS04' + collections: + - !env '${COLLECTION}' + coverages: + CS04: sar_hh_gray + DM01: + ~: + product_type_name: !env '${COLLECTION}_Product_DM01' + collections: + - !env '${COLLECTION}' + coverages: + DM01: RGNirByte + DM02: + ~: + product_type_name: !env '${COLLECTION}_Product_DM02' + collections: + - !env '${COLLECTION}' + coverages: + DM02: RGBNir + EQ02_3: + ~: + product_type_name: !env '${COLLECTION}_Product_EQ02_3' + collections: + - !env '${COLLECTION}' + coverages: + EQ02_3: RGB + EQ02_4: + ~: + product_type_name: !env '${COLLECTION}_Product_EQ02_4' + collections: + - !env '${COLLECTION}' + coverages: + EQ02_4: RGBNir + EW01: + ~: + product_type_name: !env '${COLLECTION}_Product_EW01' + collections: + - !env '${COLLECTION}' + coverages: + EW01: grayscale + EW02_3: + ~: + product_type_name: !env '${COLLECTION}_Product_EW02_3' + collections: + - !env '${COLLECTION}' + coverages: + EW02_3: RGB + EW02_4: + ~: + product_type_name: !env '${COLLECTION}_Product_EW02_4' + collections: + - !env '${COLLECTION}' + coverages: + EW02_4: RGBNir + EW02_8: + ~: + product_type_name: !env '${COLLECTION}_Product_EW02_8' + collections: + - !env '${COLLECTION}' + coverages: + EW02_8: CBGYRReNirNir2 + EW03_3: + ~: + product_type_name: !env '${COLLECTION}_Product_EW03_3' + collections: + - !env '${COLLECTION}' + coverages: + EW03_3: RGB + EW03_4: + ~: + product_type_name: !env '${COLLECTION}_Product_EW03_4' + collections: + - !env '${COLLECTION}' + coverages: + EW03_4: RGBNir + EW03_8: + ~: + product_type_name: !env '${COLLECTION}_Product_EW03_8' + collections: + - !env '${COLLECTION}' + coverages: + EW03_8: CBGYRReNirNir2 + GE01_4: + ~: + product_type_name: !env '${COLLECTION}_Product_GE01_4' + collections: + - !env '${COLLECTION}' + coverages: + GE01_4: RGBNir + GE01_3: + ~: + product_type_name: !env '${COLLECTION}_Product_GE01_3' + collections: + - !env '${COLLECTION}' + coverages: + GE01_3: RGB + GE01_1: + ~: + product_type_name: !env '${COLLECTION}_Product_GE01_1' + collections: + - !env '${COLLECTION}' + coverages: + GE01_1: grayscale + GY01: + ~: + product_type_name: !env '${COLLECTION}_Product_GY01' + collections: + - !env '${COLLECTION}' + coverages: + GY01: RGBNir + IK02: + ~: + product_type_name: !env '${COLLECTION}_Product_IK02' + collections: + - !env '${COLLECTION}' + coverages: + IK02: RGBNir + KS03: + ~: + product_type_name: !env '${COLLECTION}_Product_KS03' + collections: + - !env '${COLLECTION}' + coverages: + KS03: RGBNir + PH1A: + ~: + product_type_name: !env '${COLLECTION}_Product_PH1A' + collections: + - !env '${COLLECTION}' + coverages: + PH1A: RGBNir + PH1B: + ~: + product_type_name: !env '${COLLECTION}_Product_PH1B' + collections: + - !env '${COLLECTION}' + coverages: + PH1B: RGBNir + RE00: + ~: + product_type_name: !env '${COLLECTION}_Product_RE00' + collections: + - !env '${COLLECTION}' + coverages: + RE00: BGRReNir + RS02_2: + ~: + product_type_name: !env '${COLLECTION}_Product_RS02_2' + collections: + - !env '${COLLECTION}' + coverages: + RS02_2: sar_hh_gray + RS02_3: + ~: + product_type_name: !env '${COLLECTION}_Product_RS02_3' + collections: + - !env '${COLLECTION}' + coverages: + RS02_3: sar_hh_vv_gray + RS02_7: + ~: + product_type_name: !env '${COLLECTION}_Product_RS02_7' + collections: + - !env '${COLLECTION}' + coverages: + RS02_7: sar_hh_hv_vh_vv_rgb + SP04: + ~: + product_type_name: !env '${COLLECTION}_Product_SP04' + collections: + - !env '${COLLECTION}' + coverages: + SP04: RGBNirByte + SP05: + ~: + product_type_name: !env '${COLLECTION}_Product_SP05' + collections: + - !env '${COLLECTION}' + coverages: + SP05: RGNirByte + SP06: + ~: + product_type_name: !env '${COLLECTION}_Product_SP06' + collections: + - !env '${COLLECTION}' + coverages: + SP06: RGBNir + SP07: + ~: + product_type_name: !env '${COLLECTION}_Product_SP07' + collections: + - !env '${COLLECTION}' + coverages: + SP07: RGBNir + TX01_2: + ~: + product_type_name: !env '${COLLECTION}_Product_TX01_2' + collections: + - !env '${COLLECTION}' + coverages: + TX01_2: sar_hh_gray + TX01_3: + ~: + product_type_name: !env '${COLLECTION}_Product_TX01_3' + collections: + - !env '${COLLECTION}' + coverages: + TX01_3: sar_hh_vv_gray + TX01_7: + ~: + product_type_name: !env '${COLLECTION}_Product_TX01_7' + collections: + - !env '${COLLECTION}' + coverages: + TX01_7: sar_hh_hv_vh_vv_rgb + +post_handlers: + - path: registrar.post_handlers.ReportingPostHandler + kwargs: + service_url: emg.pass.copernicus.eu + reporting_dir: /mnt/reports/ diff --git a/config/shibboleth/attribute-map.xml b/config/shibboleth/attribute-map.xml new file mode 100755 index 0000000000000000000000000000000000000000..ddc1eeef9aebada40cd6b76e958c7127c1946cec --- /dev/null +++ b/config/shibboleth/attribute-map.xml @@ -0,0 +1,4 @@ + + + + diff --git a/config/shibboleth/dem-ac-cache.xml b/config/shibboleth/dem-ac-cache.xml new file mode 100644 index 0000000000000000000000000000000000000000..29cc048b186121be86f341375cebf85342976565 --- /dev/null +++ b/config/shibboleth/dem-ac-cache.xml @@ -0,0 +1,9 @@ + + + .+ + + Copernicus_Services Union_Inst Union_Research_Projects_space Union_Research_Projects_non-space TP_Data_Providers Data_Access_Services Ops_Space_Inf_Services Public_Auth Int_Org_NGO + + + diff --git a/config/shibboleth/dem-ac.xml b/config/shibboleth/dem-ac.xml new file mode 100644 index 0000000000000000000000000000000000000000..ef16ad42fa237e77b916f2e8bda6c78f8af2ca5d --- /dev/null +++ b/config/shibboleth/dem-ac.xml @@ -0,0 +1,9 @@ + + + .+ + + Copernicus_Services Union_Inst Union_Research_Projects_space Union_Research_Projects_non-space TP_Data_Providers Data_Access_Services Ops_Space_Inf_Services + + + diff --git a/config/shibboleth/dem-shibboleth2.xml b/config/shibboleth/dem-shibboleth2.xml new file mode 100755 index 0000000000000000000000000000000000000000..80564fad501cd585ae0f72b3d88f4ef9405e7280 --- /dev/null +++ b/config/shibboleth/dem-shibboleth2.xml @@ -0,0 +1,31 @@ + + + + + SAML2 + + SAML2 Local + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/config/shibboleth/emg-ac-cache.xml b/config/shibboleth/emg-ac-cache.xml new file mode 100644 index 0000000000000000000000000000000000000000..91372acfb7f6d0583d8048d4143fcd52dde38954 --- /dev/null +++ b/config/shibboleth/emg-ac-cache.xml @@ -0,0 +1,9 @@ + + + .+ + + Copernicus_Services Union_Inst Union_Research_Projects_space Union_Research_Projects_non-space Public_Auth + + + diff --git a/config/shibboleth/emg-ac.xml b/config/shibboleth/emg-ac.xml new file mode 100644 index 0000000000000000000000000000000000000000..91372acfb7f6d0583d8048d4143fcd52dde38954 --- /dev/null +++ b/config/shibboleth/emg-ac.xml @@ -0,0 +1,9 @@ + + + .+ + + Copernicus_Services Union_Inst Union_Research_Projects_space Union_Research_Projects_non-space Public_Auth + + + diff --git a/config/shibboleth/emg-shibboleth2.xml b/config/shibboleth/emg-shibboleth2.xml new file mode 100644 index 0000000000000000000000000000000000000000..a659df737b0c0413a0e3b030cd068e40af00cf54 --- /dev/null +++ b/config/shibboleth/emg-shibboleth2.xml @@ -0,0 +1,31 @@ + + + + + SAML2 + + SAML2 Local + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/config/shibboleth/index.html b/config/shibboleth/index.html new file mode 100644 index 0000000000000000000000000000000000000000..d1b182504b3959bb246951fad8f652018dd9572d --- /dev/null +++ b/config/shibboleth/index.html @@ -0,0 +1,11 @@ + + + + + Authentication Success + + +

Your login was successful and you were granted access to the service. + Please access the URL, which you originally requested. Proper redirection is not implemented yet.

+ + diff --git a/config/shibboleth/keygen.sh b/config/shibboleth/keygen.sh new file mode 100755 index 0000000000000000000000000000000000000000..b5378fd36f74c2c13f9c8328e79a393960fe0f60 --- /dev/null +++ b/config/shibboleth/keygen.sh @@ -0,0 +1,91 @@ +#! /bin/sh + +while getopts n:h:u:g:o:e:y:bf c + do + case $c in + u) USER=$OPTARG;; + g) GROUP=$OPTARG;; + o) OUT=$OPTARG;; + b) BATCH=1;; + f) FORCE=1;; + h) FQDN=$OPTARG;; + e) ENTITYID=$OPTARG;; + y) YEARS=$OPTARG;; + n) PREFIX=$OPTARG;; + \?) echo "keygen [-o output directory (default .)] [-u username to own keypair] [-g owning groupname] [-h hostname for cert] [-y years to issue cert] [-e entityID to embed in cert] [-n filename prefix (default 'sp')]" + exit 1;; + esac + done + +if [ -z "$OUT" ] ; then + OUT=. +fi + +if [ -z "$PREFIX" ]; then + PREFIX="sp" +fi + +if [ -n "$FORCE" ] ; then + rm $OUT/${PREFIX}-key.pem $OUT/${PREFIX}-cert.pem +fi + +if [ -s $OUT/${PREFIX}-key.pem -o -s $OUT/${PREFIX}-cert.pem ] ; then + if [ -z "$BATCH" ] ; then + echo The files $OUT/${PREFIX}-key.pem and/or $OUT/${PREFIX}-cert.pem already exist! + echo Use -f option to force recreation of keypair. + exit 2 + fi + exit 0 +fi + +if [ -z "$FQDN" ] ; then + FQDN=`hostname` +fi + +if [ -z "$YEARS" ] ; then + YEARS=10 +fi + +DAYS=`expr $YEARS \* 365` + +if [ -z "$ENTITYID" ] ; then + ALTNAME=DNS:$FQDN +else + ALTNAME=DNS:$FQDN,URI:$ENTITYID +fi + +SSLCNF=$OUT/${PREFIX}-cert.cnf +cat >$SSLCNF < /dev/null +fi +rm $SSLCNF + +if [ -s $OUT/${PREFIX}-key.pem -a -n "$USER" ] ; then + chown $USER $OUT/${PREFIX}-key.pem $OUT/${PREFIX}-cert.pem +fi + +if [ -s $OUT/${PREFIX}-key.pem -a -n "$GROUP" ] ; then + chgrp $GROUP $OUT/${PREFIX}-key.pem $OUT/${PREFIX}-cert.pem +fi diff --git a/config/shibboleth/native.logger b/config/shibboleth/native.logger new file mode 100644 index 0000000000000000000000000000000000000000..1a854391ae2d4e2386f10c706d4736a423a432d6 --- /dev/null +++ b/config/shibboleth/native.logger @@ -0,0 +1,41 @@ +# set overall behavior +log4j.rootCategory=INFO, native_log + +# fairly verbose for DEBUG, so generally leave at WARN/INFO +log4j.category.XMLTooling.XMLObject=WARN +log4j.category.XMLTooling.KeyInfoResolver=WARN +log4j.category.Shibboleth.IPRange=WARN +log4j.category.Shibboleth.PropertySet=WARN + +# raise for low-level tracing of SOAP client HTTP/SSL behavior +log4j.category.XMLTooling.libcurl=WARN + +# useful categories to tune independently: +# +# tracing of SAML messages and security policies +#log4j.category.OpenSAML.MessageDecoder=DEBUG +#log4j.category.OpenSAML.MessageEncoder=DEBUG +#log4j.category.OpenSAML.SecurityPolicyRule=DEBUG +# interprocess message remoting +#log4j.category.Shibboleth.Listener=DEBUG +# mapping of requests to applicationId +#log4j.category.Shibboleth.RequestMapper=DEBUG +# high level session cache operations +#log4j.category.Shibboleth.SessionCache=DEBUG +# persistent storage and caching +#log4j.category.XMLTooling.StorageService=DEBUG + +# define the appender + +log4j.appender.native_log=org.apache.log4j.RollingFileAppender +log4j.appender.native_log.fileName=/dev/stdout +log4j.appender.native_log.maxFileSize=0 +log4j.appender.native_log.maxBackupIndex=0 +log4j.appender.native_log.layout=org.apache.log4j.PatternLayout +log4j.appender.native_log.layout.ConversionPattern=sp-native %d{%Y-%m-%d %H:%M:%S} %p %c %x: %m%n + +#log4j.appender.warn_log=org.apache.log4j.RollingFileAppender +#log4j.appender.warn_log.fileName=/var/log/shibboleth-www/native_warn.log +#log4j.appender.warn_log.layout=org.apache.log4j.PatternLayout +#log4j.appender.warn_log.layout.ConversionPattern=%d{%Y-%m-%d %H:%M:%S} %p %c %x: %m%n +#log4j.appender.warn_log.threshold=WARN diff --git a/config/shibboleth/shib-apache.conf b/config/shibboleth/shib-apache.conf new file mode 100755 index 0000000000000000000000000000000000000000..8e5d486bda696885b9cb23f703aaff37b458f4f2 --- /dev/null +++ b/config/shibboleth/shib-apache.conf @@ -0,0 +1,41 @@ +LoadModule mod_shib /usr/lib64/shibboleth/mod_shib_24.so +ShibCompatValidUser On +UseCanonicalName On +DocumentRoot "/var/www/html" + + + SetHandler shib + + + + PassEnv APACHE_SERVERNAME + ServerName "${APACHE_SERVERNAME}" + + + Require valid-user + AuthType Basic + AuthBasicProvider file + AuthName "/secure" + AuthUserFile /run/secrets/BASIC_AUTH_USERS_AUTH + + + AuthType shibboleth + ShibRequestSetting requireSession 1 + Require shib-plugin /etc/shibboleth/pass-ac.xml + + + + + Require valid-user + AuthType Basic + AuthBasicProvider file + AuthName "/secure" + AuthUserFile /run/secrets/BASIC_AUTH_USERS_AUTH + + + AuthType shibboleth + ShibRequestSetting requireSession 1 + Require shib-plugin /etc/shibboleth/pass-ac-cache.xml + + + diff --git a/config/shibboleth/shibd.logger b/config/shibboleth/shibd.logger new file mode 100644 index 0000000000000000000000000000000000000000..909609dfcbb274cac05f838129cfc49e1ae2fd37 --- /dev/null +++ b/config/shibboleth/shibd.logger @@ -0,0 +1,75 @@ +# set overall behavior +log4j.rootCategory=INFO, shibd_log, warn_log + +# fairly verbose for DEBUG, so generally leave at INFO +log4j.category.XMLTooling.XMLObject=INFO +log4j.category.XMLTooling.KeyInfoResolver=INFO +log4j.category.Shibboleth.IPRange=INFO +log4j.category.Shibboleth.PropertySet=INFO + +# raise for low-level tracing of SOAP client HTTP/SSL behavior +log4j.category.XMLTooling.libcurl=INFO + +# useful categories to tune independently: +# +# tracing of SAML messages and security policies +#log4j.category.OpenSAML.MessageDecoder=DEBUG +#log4j.category.OpenSAML.MessageEncoder=DEBUG +#log4j.category.OpenSAML.SecurityPolicyRule=DEBUG +#log4j.category.XMLTooling.SOAPClient=DEBUG +# interprocess message remoting +#log4j.category.Shibboleth.Listener=DEBUG +# mapping of requests to applicationId +#log4j.category.Shibboleth.RequestMapper=DEBUG +# high level session cache operations +#log4j.category.Shibboleth.SessionCache=DEBUG +# persistent storage and caching +#log4j.category.XMLTooling.StorageService=DEBUG + +# logs XML being signed or verified if set to DEBUG +log4j.category.XMLTooling.Signature.Debugger=INFO, sig_log +log4j.additivity.XMLTooling.Signature.Debugger=false +log4j.ownAppenders.XMLTooling.Signature.Debugger=true + +# the tran log blocks the "default" appender(s) at runtime +# Level should be left at INFO for this category +log4j.category.Shibboleth-TRANSACTION=INFO, tran_log +log4j.additivity.Shibboleth-TRANSACTION=false +log4j.ownAppenders.Shibboleth-TRANSACTION=true + +# uncomment to suppress particular event types +#log4j.category.Shibboleth-TRANSACTION.AuthnRequest=WARN +#log4j.category.Shibboleth-TRANSACTION.Login=WARN +#log4j.category.Shibboleth-TRANSACTION.Logout=WARN + +# define the appenders + +log4j.appender.shibd_log=org.apache.log4j.RollingFileAppender +log4j.appender.shibd_log.fileName=/dev/stdout +log4j.appender.shibd_log.maxFileSize=0 +log4j.appender.shibd_log.maxBackupIndex=0 +log4j.appender.shibd_log.layout=org.apache.log4j.PatternLayout +log4j.appender.shibd_log.layout.ConversionPattern=sp-shibd %d{%Y-%m-%d %H:%M:%S} %p %c %x: %m%n + +#log4j.appender.warn_log=org.apache.log4j.RollingFileAppender +#log4j.appender.warn_log.fileName=/var/log/shibboleth/shibd_warn.log +#log4j.appender.warn_log.maxFileSize=0 +#log4j.appender.warn_log.maxBackupIndex=0 +#log4j.appender.warn_log.layout=org.apache.log4j.PatternLayout +#log4j.appender.warn_log.layout.ConversionPattern=%d{%Y-%m-%d %H:%M:%S} %p %c %x: %m%n +#log4j.appender.warn_log.threshold=WARN + +log4j.appender.tran_log=org.apache.log4j.RollingFileAppender +log4j.appender.tran_log.fileName=/dev/stdout +log4j.appender.tran_log.maxFileSize=0 +log4j.appender.tran_log.maxBackupIndex=0 +log4j.appender.tran_log.layout=org.apache.log4j.PatternLayout +log4j.appender.tran_log.layout.ConversionPattern=sp-transaction %d{%Y-%m-%d %H:%M:%S} %p %c %x: %m%n + +log4j.appender.sig_log=org.apache.log4j.FileAppender +log4j.appender.sig_log.fileName=/dev/stdout +log4j.appender.sig_log.maxFileSize=0 +log4j.appender.sig_log.maxBackupIndex=0 +log4j.appender.sig_log.layout=org.apache.log4j.PatternLayout +log4j.appender.sig_log.layout.ConversionPattern=sp-signature %m + diff --git a/config/shibboleth/vhr18-ac-cache.xml b/config/shibboleth/vhr18-ac-cache.xml new file mode 100644 index 0000000000000000000000000000000000000000..6a1528120ba8f56d59220d3d9a4cd4948044dafa --- /dev/null +++ b/config/shibboleth/vhr18-ac-cache.xml @@ -0,0 +1,9 @@ + + + .+ + + Copernicus_Services Union_Inst Union_Research_Projects_space Union_Research_Projects_non-space Public_Auth Int_Org_NGO Public + + + diff --git a/config/shibboleth/vhr18-ac.xml b/config/shibboleth/vhr18-ac.xml new file mode 100644 index 0000000000000000000000000000000000000000..2a0fb352d37a80065fc5b9100b99221d91b111ab --- /dev/null +++ b/config/shibboleth/vhr18-ac.xml @@ -0,0 +1,9 @@ + + + .+ + + Copernicus_Services Union_Inst Union_Research_Projects_space Union_Research_Projects_non-space Public_Auth Int_Org_NGO + + + diff --git a/config/shibboleth/vhr18-shibboleth2.xml b/config/shibboleth/vhr18-shibboleth2.xml new file mode 100644 index 0000000000000000000000000000000000000000..c063012bef8389f26d1d60466af9f72ee7754078 --- /dev/null +++ b/config/shibboleth/vhr18-shibboleth2.xml @@ -0,0 +1,31 @@ + + + + + SAML2 + + SAML2 Local + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/config/vhr18_index-dev.html b/config/vhr18_index-dev.html index 800b881ae8b57374b89d9125d8eb21c8eef65253..d517d3344f24401b159566813e906c8faa4d1ef8 100644 --- a/config/vhr18_index-dev.html +++ b/config/vhr18_index-dev.html @@ -3,6 +3,7 @@ + PRISM View Server diff --git a/config/vhr18_index-ops.html b/config/vhr18_index-ops.html index 2fe8a273a36b070f0205689ac1e6602f70d85081..7000f3f1495c517a36b7377b069d260c462f9544 100644 --- a/config/vhr18_index-ops.html +++ b/config/vhr18_index-ops.html @@ -3,6 +3,7 @@ + PRISM View Server diff --git a/config/vhr18_init-db.sh b/config/vhr18_init-db.sh index 46c0974298ad602e0784d02dc6a0ca707f5430c0..3f1451dfd7a0e3afe7286672fc546bdbdfe75fab 100644 --- a/config/vhr18_init-db.sh +++ b/config/vhr18_init-db.sh @@ -401,22 +401,6 @@ if python3 manage.py id check "${COLLECTION}"; then echo "Provided collection '${COLLECTION}' not valid." fi - python3 manage.py storageauth create auth-cloud-ovh "${OS_AUTH_URL_SHORT}" \ - --type keystone \ - -p auth-version "${ST_AUTH_VERSION}" \ - -p identity-api-version="${ST_AUTH_VERSION}" \ - -p username "${OS_USERNAME}" \ - -p password "${OS_PASSWORD}" \ - -p tenant-name "${OS_TENANT_NAME}" \ - -p tenant-id "${OS_TENANT_ID}" \ - -p region-name "${OS_REGION_NAME}" - - python3 manage.py storage create \ - ${UPLOAD_CONTAINER} ${UPLOAD_CONTAINER} \ - --type swift \ - --storage-auth auth-cloud-ovh - - else echo "Using existing database" fi diff --git a/config/vhr18_registrar-config.yml b/config/vhr18_registrar-config.yml new file mode 100644 index 0000000000000000000000000000000000000000..cee5f0f3edf66fea4f6d783084d268cbe83dd0ca --- /dev/null +++ b/config/vhr18_registrar-config.yml @@ -0,0 +1,220 @@ +sources: + - type: swift + name: !env '${UPLOAD_CONTAINER}' + kwargs: + username: !env '${OS_USERNAME}' + password: !env '${OS_PASSWORD}' + tenant_name: !env '${OS_TENANT_NAME}' + tenant_id: !env '${OS_TENANT_ID}' + region_name: !env '${OS_REGION_NAME}' + auth_version: !env '${ST_AUTH_VERSION}' + auth_url: !env '${OS_AUTH_URL}' + auth_url_short: !env '${OS_AUTH_URL_SHORT}' + container: !env '${UPLOAD_CONTAINER}' + +schemes: + - type: gsc + +backends: + - type: eoxserver + filter: + kwargs: + instance_base_path: /var/www/pvs/dev + instance_name: pvs_instance + mapping: + PL00: + Level_1: + product_type_name: !env '${COLLECTION}_Product_PL00' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + PL00: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_PL00' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + PL00: RGBNir + masks: + validity: validity + DM02: + Level_1: + product_type_name: !env '${COLLECTION}_Product_DM02' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + DM02: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_DM02' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + DM02: RGBNir + masks: + validity: validity + KS03: + Level_1: + product_type_name: !env '${COLLECTION}_Product_KS03' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + KS03: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_KS03' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + KS03: RGBNir + masks: + validity: validity + KS04: + Level_1: + product_type_name: !env '${COLLECTION}_Product_KS04' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + KS04: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_KS04' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + KS04: RGBNir + masks: + validity: validity + PH1A: + Level_1: + product_type_name: !env '${COLLECTION}_Product_PH1A' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + PH1A: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_PH1A' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + PH1A: RGBNir + masks: + validity: validity + PH1B: + Level_1: + product_type_name: !env '${COLLECTION}_Product_PH1B' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + PH1B: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_PH1B' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + PH1B: RGBNir + masks: + validity: validity + SP06: + Level_1: + product_type_name: !env '${COLLECTION}_Product_SP06' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + SP06: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_SP06' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + SP06: RGBNir + masks: + validity: validity + SP07: + Level_1: + product_type_name: !env '${COLLECTION}_Product_SP07' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + SP07: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_SP07' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + SP07: RGBNir + masks: + validity: validity + SW00: + Level_1: + product_type_name: !env '${COLLECTION}_Product_SW00' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + SW00: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_SW00' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + SW00: RGBNir + masks: + validity: validity + TR00: + Level_1: + product_type_name: !env '${COLLECTION}_Product_TR00' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_1' + coverages: + TR00: RGBNir + masks: + validity: validity + Level_3: + product_type_name: !env '${COLLECTION}_Product_TR00' + collections: + - !env '${COLLECTION}' + - !env '${COLLECTION}_Level_3' + coverages: + TR00: RGBNir + masks: + validity: validity + +post_handlers: + - path: registrar.post_handlers.ReportingPostHandler + kwargs: + service_url: vhr18.pass.copernicus.eu + reporting_dir: /mnt/reports/ diff --git a/core/Dockerfile b/core/Dockerfile index 4bdce96b93bfd307939fd8fb122f54b4ca933c62..70fb15f1662252baaec7cc3b2e7e82520bbc77de 100644 --- a/core/Dockerfile +++ b/core/Dockerfile @@ -31,7 +31,7 @@ LABEL name="prism view server core" \ vendor="EOX IT Services GmbH " \ license="MIT Copyright (C) 2019 EOX IT Services GmbH " \ type="prism view server core" \ - version="0.0.1-dev" + version="1.0.0-rc.2" USER root @@ -43,7 +43,7 @@ RUN apt update && \ rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* RUN pip3 install . && \ - pip3 install python-keystoneclient python-swiftclient redis + pip3 install python-keystoneclient python-swiftclient redis click setuptools jsonschema boto3 ENV INSTANCE_ID="prism-view-server_core" \ INSTANCE_NAME="pvs_instance"\ @@ -77,12 +77,22 @@ ADD rgbnir_definition.json \ configure.sh \ run-httpd.sh \ run-registrar.sh \ - registrar.py \ entrypoint.sh \ wait-initialized.sh \ initialized.sh \ / +RUN mkdir /registrar +ADD registrar/ \ + /registrar/registrar + +ADD setup.py \ + /registrar + +RUN cd /registrar && \ + ls && \ + python3 setup.py install + RUN chmod -v +x \ /configure.sh \ /run-registrar.sh \ diff --git a/core/registrar.py b/core/registrar.py deleted file mode 100644 index 60b805e36dd57358854682339fdf4e1fcb257d6f..0000000000000000000000000000000000000000 --- a/core/registrar.py +++ /dev/null @@ -1,498 +0,0 @@ -#!/usr/bin/env python -# ----------------------------------------------------------------------------- -# -# Project: registrar.py -# Authors: Stephan Meissl -# -# ----------------------------------------------------------------------------- -# Copyright (c) 2019 EOX IT Services GmbH -# -# Python script to register products. -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies of this Software or works derived from this Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# ----------------------------------------------------------------------------- - - -import sys -import os -import argparse -import textwrap -import logging -import traceback -from xml.sax.saxutils import escape -import subprocess - -import redis -import lxml.etree -from swiftclient.service import SwiftService - -import django -from django.db import transaction -from django.contrib.gis.geos import GEOSGeometry -from osgeo import gdal - -path = os.path.join(os.getenv('INSTALL_DIR', "/var/www/pvs"), "pvs_instance") -if path not in sys.path: - sys.path.append(path) - -os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pvs_instance.settings") -django.setup() - -from eoxserver.backends import access -from eoxserver.contrib import vsi -from eoxserver.backends import models as backends -from eoxserver.core.util.timetools import isoformat -from eoxserver.resources.coverages import models -from eoxserver.resources.coverages.registration.product import ( - ProductRegistrator -) -from eoxserver.resources.coverages.registration.registrators.gdal import ( - GDALRegistrator -) - -logger = logging.getLogger(__name__) - -def setup_logging(verbosity): - # start logging setup - # get command line level - verbosity = verbosity - if verbosity == 0: - level = logging.CRITICAL - elif verbosity == 1: - level = logging.ERROR - elif verbosity == 2: - level = logging.WARNING - elif verbosity == 3: - level = logging.INFO - else: - level = logging.DEBUG - logger.setLevel(level) - sh = logging.StreamHandler() - sh.setLevel(level) - formatter = logging.Formatter("%(asctime)s %(levelname)s: %(message)s") - sh.setFormatter(formatter) - logger.addHandler(sh) - # finished logging setup - - -def set_gdal_swift_auth(): - # parsing command line output of swift auth - auth_keys = subprocess.check_output(["swift", "auth"]).decode(sys.stdout.encoding).split("\n") - storage_url = auth_keys[0].split("OS_STORAGE_URL=")[1] - auth_token = auth_keys[1].split("OS_AUTH_TOKEN=")[1] - # setting gdal config - gdal.SetConfigOption("SWIFT_STORAGE_URL", storage_url) - gdal.SetConfigOption("SWIFT_AUTH_TOKEN", auth_token) - - -def add_mask(product): - metadata_item = product.metadata_items.all()[0] - with access.vsi_open(metadata_item) as f: - tree = lxml.etree.parse(f) - root = tree.getroot() - wkt = tree.xpath( - '//gsc:opt_metadata/gml:metaDataProperty/gsc:EarthObservationMetaData/eop:vendorSpecific/eop:SpecificInformation[eop:localAttribute/text() = "CF_POLY"]/eop:localValue/text()', - namespaces=root.nsmap - )[0] - geometry = GEOSGeometry(wkt) - mask_type = models.MaskType.objects.get(product_type=product.product_type) - logger.debug("Adding mask") - models.Mask.objects.create( - product=product, - mask_type=mask_type, - geometry=geometry, - ) - - -def get_product_type_and_level(metadata_item): - level = None - with access.vsi_open(metadata_item) as f: - tree = lxml.etree.parse(f) - root = tree.getroot() - - try: - xp = '//gml:using/eop:EarthObservationEquipment/eop:platform/eop:Platform/eop:shortName/text()' - product_type_name = tree.xpath(xp, namespaces=root.nsmap)[0] - except Exception as e: - logger.debug( - 'Failed to determine product type of %s, error was %s' - % (metadata_item.location, e) - ) - - try: - xp = '//gml:metaDataProperty/gsc:EarthObservationMetaData/eop:parentIdentifier/text()' - parent_identifier = tree.xpath(xp, namespaces=root.nsmap)[0] - - if parent_identifier.endswith('Level_1'): - level = 'Level_1' - if parent_identifier.endswith('Level_3'): - level = 'Level_3' - else: - raise Exception('Invalid parent identifier type name %s' % parent_identifier) - except Exception as e: - logger.debug( - 'Failed to determine product level of %s, error was %s' - % (metadata_item.location, e) - ) - - return product_type_name, level - - -def get_product_collection(metadata_file): - # in case collection needs to be determined from metadata - try: - if metadata_file.startswith("/vsiswift"): - set_gdal_swift_auth() - with vsi.open(metadata_file, "r") as f: - tree = lxml.etree.parse(f) - root = tree.getroot() - xp = '//gml:metaDataProperty/gsc:EarthObservationMetaData/eop:parentIdentifier/text()' - product_type_name = tree.xpath(xp, namespaces=root.nsmap) - extracted = product_type_name[0].split('/')[0] - return extracted - except Exception as e: - logger.debug( - 'Failed to determine product collection for metadata file %s, error was %s' - % (metadata_file, e) - ) - - -def get_product_type_from_band_count(product_type_name, file_path): - # get raster band count via gdal - logger.debug("Opening file using GDAL: %s" % file_path) - if file_path.startswith("/vsiswift"): - set_gdal_swift_auth() - src_ds = gdal.Open(file_path) - if src_ds is None: - raise RegistrationError("Band check: failed to open dataset: %s " % file_path) - # try to fetch product model with _bandcount - product_type_name_upd = "%s_%s" % (product_type_name, src_ds.RasterCount) - try: - product_type_model = models.ProductType.objects.get(name=product_type_name_upd) - return product_type_model - except models.ProductType.DoesNotExist: - raise RegistrationError("Product Type: '%s' was not found" % product_type_name_upd) - - -class RegistrationError(Exception): - pass - - -@transaction.atomic -def registrar( - collection_stack, - objects_prefix, upload_container=None, replace=False, client=None, registered_set_key=None, - reporting_dir=None, service_url=None - -): - logger.info("Starting registration of product '%s'." % objects_prefix) - - metadata_package, data_package = None, None - if not upload_container: - # assuming objects_prefix = bucket/itemname - upload_container = objects_prefix.partition("/")[0] - objects_prefix = objects_prefix.partition("/")[2] - with SwiftService() as swift: - list_parts_gen = swift.list( - container=upload_container, options={"prefix": objects_prefix}, - ) - for page in list_parts_gen: - if page["success"]: - for item in page["listing"]: - if item["name"].endswith(".xml"): - metadata_package = item["name"] - elif item["name"].endswith(".TIF") or \ - item["name"].endswith(".tif"): - data_package = item["name"] - elif not item["name"].endswith(".tar"): - raise RegistrationError( - "Product with objects prefix '%s' has " - "wrong content '%s'." - % (objects_prefix, item["name"]) - ) - else: - logger.error(page["error"]) - raise RegistrationError( - "No product found with objects prefix '%s'." - % objects_prefix - ) - - if metadata_package is None or data_package is None: - raise RegistrationError( - "Product with objects prefix '%s' has missing content." - % objects_prefix - ) - logger.debug("Found objects '%s' and '%s'." % (data_package, metadata_package)) - - storage = backends.Storage.objects.get(name=upload_container) - metadata_item = models.MetaDataItem(storage=storage, location=metadata_package) - - product_type, level = get_product_type_and_level(metadata_item) - if collection_stack == 'DEM': - # special for DEM files, collection name === product_type - gdal_metadata_file_path = "/vsiswift/%s/%s" % (upload_container, metadata_package) - product_type = get_product_collection(gdal_metadata_file_path) - logger.debug("Registering product") - product_type_name = "%s_Product_%s" % (collection_stack, product_type) - - try: - # first find product type by name from path - product_type_model = models.ProductType.objects.get(name=product_type_name) - except models.ProductType.DoesNotExist: - # if not found, maybe there are more product types with _bandcount suffix - gdal_file_path = "/vsiswift/%s/%s" % (upload_container, data_package) - product_type_model = get_product_type_from_band_count(product_type_name, gdal_file_path) - product_type_name = product_type_model.name - coverage_type_names = product_type_model.allowed_coverage_types.all() - if len(coverage_type_names) > 1: - logger.warning("More available 'CoverageType' found, selecting the first one.") - coverage_type_name = coverage_type_names[0].name - - product, replaced = ProductRegistrator().register( - metadata_locations=[[upload_container, - metadata_package, ], ], - type_name=product_type_name, - replace=replace, - extended_metadata=True, - mask_locations=None, - package_path=None, - simplify_footprint_tolerance=0.0001, # ~10meters - overrides={}, - ) - if product.footprint.empty: - product.delete() - raise RegistrationError("No footprint was extracted. full product: %s" % product) - - collection = models.Collection.objects.get( - identifier=collection_stack - ) - logger.debug("Inserting product into collection %s" % collection_stack) - models.collection_insert_eo_object(collection, product) - - if collection_stack == "DEM": - # also insert it to its own collection - collection_own = models.Collection.objects.get( - identifier="%s_%s" % (collection, product_type) - ) - logger.debug("Inserting product to collection %s_%s" % (collection, product_type)) - models.collection_insert_eo_object(collection_own, product) - - if level == 'Level_1': - collection_level_1 = models.Collection.objects.get( - identifier="%s_Level_1" % collection - ) - logger.debug("Inserting product to collection %s_Level_1" % collection) - models.collection_insert_eo_object(collection_level_1, product) - elif level == 'Level_3': - collection_level_3 = models.Collection.objects.get( - identifier="%s_Level_3" % collection - ) - logger.debug("Inserting product to collection %s_Level_3" % collection) - models.collection_insert_eo_object(collection_level_3, product) - - logger.debug("Registering coverage") - report = GDALRegistrator().register( - data_locations=[[upload_container, data_package, ], ], - metadata_locations=[[upload_container, - metadata_package, ], ], - coverage_type_name=coverage_type_name, - overrides={ - "identifier": "%s__coverage" % product.identifier, - "footprint": None, - }, - replace=replace, - ) - logger.debug("Adding coverage to product") - models.product_add_coverage(product, report.coverage) - - try: - add_mask(product) - except Exception as e: - logger.debug("Couldn't add mask.") - logger.debug(traceback.format_exc()) - logger.debug("%s: %s\n" % (type(e).__name__, str(e))) - - if client is not None: - logger.debug( - "Storing times in redis queue '%s" % registered_set_key - ) - client.sadd( - registered_set_key, "%s/%s" - % ( - product.begin_time.strftime("%Y%m%dT%H%M%S"), - product.end_time.strftime("%Y%m%dT%H%M%S") - ) - ) - - timestamp = product.inserted.strftime("%Y%m%dT%H%M%S") - - if reporting_dir is not None: - with open(os.path.join(reporting_dir, 'item_%s_%s.xml' % (timestamp, product.identifier)),'w') as f: - f.write(textwrap.dedent(""" - - - {identifier} - {availability_time} - - WCS - {wms_capabilities_url} - - - WMS - {wcs_capabilities_url} - - - """.format( - identifier=escape(product.identifier), - availability_time=escape(isoformat(product.inserted)), - wcs_capabilities_url=escape( - '%s/ows?service=wcs&request=GetCapabilities&cql=identifier="%s"' - % (service_url, product.identifier) - ), - wms_capabilities_url=escape( - '%s/ows?service=wms&request=GetCapabilities&cql=identifier="%s"' - % (service_url, product.identifier) - ), - ))) - - logger.info( - "Successfully finished registration of product '%s'." % objects_prefix - ) - - -def registrar_redis_wrapper( - collection, - upload_container, - replace=False, host="localhost", port=6379, - register_queue_key="register_queue", - registered_set_key="registered_set", - reporting_dir=None, - service_url=None, -): - client = redis.Redis( - host=host, port=port, charset="utf-8", decode_responses=True - ) - while True: - logger.debug("waiting for redis queue '%s'..." % register_queue_key) - value = client.brpop(register_queue_key) - try: - registrar( - collection, - value[1], - upload_container, - replace=replace, - client=client, - registered_set_key=registered_set_key, - reporting_dir=reporting_dir, - service_url=service_url, - ) - except Exception as e: - logger.debug(traceback.format_exc()) - logger.error("%s: %s\n" % (type(e).__name__, str(e))) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser() - parser.description = textwrap.dedent("""\ - Register products. - """) - - parser.add_argument( - "--mode", default="standard", choices=["standard", "redis"], - help=( - "The mode to run the registrar. Either one-off (standard) or " - "reading from a redis queue." - ) - ) - parser.add_argument( - "--objects-prefix", default=None, - help=( - "Prefix to objects holding the metadata and data of product." - ) - ) - parser.add_argument( - "--replace", action="store_true", - help=( - "Replace existing products instead of skipping the registration." - ) - ) - parser.add_argument( - "--redis-register-queue-key", default="register_queue" - ) - parser.add_argument( - "--redis-registered-set-key", default="registered_set" - ) - parser.add_argument( - "--redis-host", default="localhost" - ) - parser.add_argument( - "--redis-port", type=int, default=6379 - ) - parser.add_argument( - "--reporting-dir", - ) - parser.add_argument( - "--service-url", - ) - - parser.add_argument( - "-v", "--verbosity", type=int, default=3, choices=[0, 1, 2, 3, 4], - help=( - "Set verbosity of log output " - "(4=DEBUG, 3=INFO, 2=WARNING, 1=ERROR, 0=CRITICAL). (default: 3)" - ) - ) - - arg_values = parser.parse_args() - - setup_logging(arg_values.verbosity) - - collection = os.environ.get('COLLECTION') - if collection is None: - logger.critical("Collection environment variable not set.") - sys.exit(1) - - upload_container = os.environ.get('UPLOAD_CONTAINER') - if upload_container is None: - logger.warn("UPLOAD_CONTAINER environment variable not set. Assuming part of path bucket/item") - - if arg_values.mode == "standard": - registrar( - collection, - arg_values.objects_prefix, - upload_container, - replace=arg_values.replace, - reporting_dir=arg_values.reporting_dir, - service_url=arg_values.service_url, - ) - else: - registrar_redis_wrapper( - collection, - upload_container, - replace=arg_values.replace, - host=arg_values.redis_host, - port=arg_values.redis_port, - register_queue_key=arg_values.redis_register_queue_key, - registered_set_key=arg_values.redis_registered_set_key, - reporting_dir=arg_values.reporting_dir, - service_url=arg_values.service_url, - ) diff --git a/core/registrar/__init__.py b/core/registrar/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/core/registrar/backend.py b/core/registrar/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..17144f4753c8ee240388d6ecdf94d217110e47ed --- /dev/null +++ b/core/registrar/backend.py @@ -0,0 +1,256 @@ +import os +import re +import sys +import logging +from typing import List +import json + +import django +from django.db import transaction +from django.contrib.gis.geos import GEOSGeometry, Polygon + +from .exceptions import RegistrationError +from .context import Context +from .source import Source, LocalSource, S3Source, SwiftSource + + +logger = logging.getLogger(__name__) + + +class RegistrationResult: + pass + + +class Backend: + def register_item(self, item: Context) -> RegistrationResult: + raise NotImplementedError + + +class EOxServerBackend(Backend): + def __init__(self, instance_base_path: str, instance_name: str, mapping: dict, simplify_footprint_tolerance: int=None): + self.mapping = mapping + self.simplify_footprint_tolerance = simplify_footprint_tolerance + path = os.path.join(instance_base_path, instance_name) + if path not in sys.path: + sys.path.append(path) + + os.environ.setdefault("DJANGO_SETTINGS_MODULE", f"{instance_name}.settings") # TODO: from config + django.setup() + + def exists(self, source: Source, item: Context): + from eoxserver.resources.coverages import models + return models.Product.objects.filter(identifier=item.identifier).exists() + + def _get_storage_from_source(self, source: Source, path: str) -> list: + from eoxserver.backends import models as backends + + created_storage_auth = False + created_storage = False + storage_name = None + if isinstance(source, LocalSource): + storage, created_storage = backends.Storage.get_or_create( + name=source.name, + url=source.root_directory, + storage_type='local', + ) + storage_name = storage.name + + elif isinstance(source, S3Source): + params = json.dumps({ + 'ACCESS_KEY_ID': source.access_key_id, + 'SECRET_ACCESS_KEY': source.secret_access_key, + }) + + endpoint_url = source.endpoint_url + if endpoint_url.startswith('https://'): + endpoint_url = endpoint_url[len('https://'):] + elif endpoint_url.startswith('http://'): + endpoint_url = endpoint_url[len('http://'):] + + storage_auth, created_storage_auth = backends.StorageAuth.objects.get_or_create( + name=endpoint_url, + url=endpoint_url, + storage_auth_type='S3', + auth_parameters=params, + ) + + bucket, _ = source.get_container_and_path(path) + + storage, created_storage = backends.Storage.objects.get_or_create( + name=source.name if source.bucket_name else f'{source.name}-{bucket}', + url=bucket, + storage_type='S3', + storage_auth=storage_auth, + ) + storage_name = storage.name + + elif isinstance(source, SwiftSource): + params = json.dumps({ + 'auth-version': str(source.auth_version), + 'identity-api-version': str(source.auth_version), + 'username': source.username, + 'password': source.password, + 'tenant-name': source.tenant_name, + 'tenant-id': source.tenant_id, + 'region-name': source.region_name, + }) + + storage_auth, created_storage_auth = backends.StorageAuth.objects.get_or_create( + name=source.auth_url, + url=source.auth_url_short or source.auth_url, + storage_auth_type='keystone', + auth_parameters=params, + ) + + container, _ = source.get_container_and_path(path) + + storage, created_storage = backends.Storage.objects.get_or_create( + name=source.name if source.container else f'{source.name}-{container}', + url=container, + storage_type='swift', + storage_auth=storage_auth, + ) + storage_name = storage.name + + if created_storage_auth: + logger.info(f'Created storage auth for {source.name}') + if created_storage: + logger.info(f'Created storage for {source.name}') + + return [storage_name] if storage_name else [] + + @transaction.atomic + def register(self, source: Source, item: Context, replace: bool) -> RegistrationResult: + # ugly, ugly hack + from eoxserver.resources.coverages import models + from eoxserver.resources.coverages.registration.product import ProductRegistrator + from eoxserver.resources.coverages.registration.browse import BrowseRegistrator + from eoxserver.resources.coverages.registration.mask import MaskRegistrator + from eoxserver.resources.coverages.registration.registrators.gdal import GDALRegistrator + + # get the mapping for this particular item + type_mapping = self.mapping[item.product_type] + mapping = type_mapping.get(item.product_level) or type_mapping.get(None) + + if not mapping: + raise RegistrationError(f'Could not get mapping for {item.product_type} {item.product_level}') + + _, metadata_file = source.get_container_and_path(item.metadata_files[0]) + # metadata_file = '/'.join(item.metadata_files[0].split('/')[1:]) + storage = self._get_storage_from_source(source, item.path) + + try: + models.ProductType.objects.get(name=mapping['product_type_name']) + except models.ProductType.DoesNotExist: + pass + + if 'footprint' in item.metadata: + footprint = GEOSGeometry(item.metadata.pop('footprint')) + else: + footprint = None + + product, _ = ProductRegistrator().register( + metadata_locations=[storage + [metadata_file]], + type_name=mapping['product_type_name'], + replace=replace, + extended_metadata=True, + mask_locations=None, + package_path=None, + simplify_footprint_tolerance=self.simplify_footprint_tolerance, + overrides=dict( + identifier=item.identifier, + footprint=footprint, + **item.metadata + ), + ) + if not product.footprint or product.footprint.empty: + raise RegistrationError("No footprint was extracted. full product: %s" % product) + + # insert the product in the to be associated collections + for collection_id in mapping.get('collections', []): + collection = models.Collection.objects.get( + identifier=collection_id, + ) + models.collection_insert_eo_object(collection, product) + + # register coverages and link them to the product + for raster_identifier, coverage_type_name in mapping.get('coverages', {}).items(): + raster_items = item.raster_files.get(raster_identifier) + raster_items = [ + storage + [source.get_container_and_path(raster_item)[1]] + for raster_item in (raster_items if isinstance(raster_items, list) else [raster_items]) + ] + + logger.info(f"Registering coverage{'s' if len(raster_items) > 1 else ''} {raster_items} as {coverage_type_name}") + + report = GDALRegistrator().register( + data_locations=raster_items, + metadata_locations=[storage + [metadata_file]], + coverage_type_name=coverage_type_name, + overrides={ + "identifier": f'{product.identifier}__{raster_identifier}__coverage', + "footprint": None, + }, + replace=replace, + ) + logger.debug("Adding coverage to product") + models.product_add_coverage(product, report.coverage) + + # register browses + for raster_identifier, browse_type_name in mapping.get('browses', {}).items(): + raster_item = item.raster_files.get(raster_identifier) + + _, raster_item = source.get_container_and_path(raster_item) + logger.info(f"Adding browse {browse_type_name or 'default'} {raster_item} to product") + + BrowseRegistrator().register( + product.identifier, + storage + [raster_item], + browse_type_name, + ) + + # register masks + for mask_identifier, mask_type_name in mapping.get('masks', {}).items(): + _, mask_item = source.get_container_and_path(item.mask_files.get(mask_identifier)) + if mask_item: + logger.info(f"Adding mask (file) {mask_type_name} to product") + MaskRegistrator().register( + product.identifier, + storage + [mask_item], + mask_type_name, + ) + + mask_item = item.masks.get(mask_identifier) + if mask_item: + logger.info(f"Adding mask (geometry) {mask_type_name} to product") + models.Mask.objects.create( + product=product, + mask_type=models.MaskType.objects.get( + product_type=product.product_type, + name=mask_type_name, + ), + geometry=mask_item, + ) + + +BACKENDS = { + 'eoxserver': EOxServerBackend +} + +def get_backends(config: dict, path: str) -> List[Backend]: + cfg_backends = config['backends'] + + backends = [ + BACKENDS[cfg_backend['type']]( + *cfg_backend.get('args', []), + **cfg_backend.get('kwargs', {}), + ) + for cfg_backend in cfg_backends + if not cfg_backend.get('filter') or re.match(cfg_backend['filter'], path) + ] + + if not backends: + raise RegistrationError(f'Could not find a suitable backend for the path {path}') + + return backends + diff --git a/core/registrar/cli.py b/core/registrar/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..0a2943ffa33a7c93227e2ce43e5faf4ac687d35f --- /dev/null +++ b/core/registrar/cli.py @@ -0,0 +1,81 @@ +from os.path import join, dirname +import logging.config +import json + +import click +import yaml +import jsonschema + +from .registrar import register_file +from .daemon import run_daemon +from .config import load_config + + +def setup_logging(debug=False): + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'brief': { + 'format': '%(levelname)s %(name)s: %(message)s' + } + }, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'level': 'DEBUG' if debug else 'INFO', + 'formatter': 'brief', + } + }, + 'root': { + 'handlers': ['console'], + 'level': 'DEBUG' if debug else 'INFO', + } + }) + + +def validate_config(config): + with open(join(dirname(__file__), 'config-schema.yaml')) as f: + schema = yaml.load(f) + + jsonschema.validate(config, schema) + + +@click.group() +def cli(): + pass + + +@cli.command(help='Run the registrar daemon, attaching to a Redis queue') +@click.option('--config-file', type=click.File('r')) +@click.option('--validate/--no-validate', default=False) +@click.option('--replace/--no-replace', default=False) +@click.option('--host', type=str) +@click.option('--port', type=int) +@click.option('--listen-queue', type=str) +@click.option('--registered-set-key', type=str) +@click.option('--debug/--no-debug', default=False) +def daemon(config_file=None, validate=False, replace=False, host=None, port=None, listen_queue=None, registered_set_key=None, debug=False): + setup_logging(debug) + config = load_config(config_file) + if validate: + validate_config(config) + run_daemon(config, replace, host, port, listen_queue, registered_set_key) + + +@cli.command(help='Run a single, one-off registration') +@click.argument('file_path', type=str) +@click.option('--config-file', type=click.File('r')) +@click.option('--validate/--no-validate', default=False) +@click.option('--replace/--no-replace', default=False) +@click.option('--debug/--no-debug', default=False) +def register(file_path, config_file=None, validate=False, replace=False, debug=False): + setup_logging(debug) + config = load_config(config_file) + if validate: + validate_config(config) + + register_file(config, file_path, replace) + +if __name__ == '__main__': + cli() diff --git a/core/registrar/config-schema.yaml b/core/registrar/config-schema.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c6d6659c1bade600b576ccf0f0593a25f704aaec --- /dev/null +++ b/core/registrar/config-schema.yaml @@ -0,0 +1,96 @@ +$id: https://example.com/address.schema.json +$schema: http://json-schema.org/draft-07/schema# +type: object +properties: + source: + description: Input sources definitions + type: array + items: + description: A single source definition + type: object + properties: + type: + description: The source type. + type: string + enum: ['local', 's3', 'swift'] + filter: + description: Optional filter to only be used for these paths + type: string + args: + description: Constructor arguments + type: array + kwargs: + description: Constructor keyword arguments + type: object + schemes: + description: Registration schemes definitions + type: array + items: + description: A single registration scheme definition + type: object + properties: + type: + description: The registration scheme type. + type: string + enum: ['gsc', 'sentinel-2'] + filter: + description: Optional filter to only be used for these paths + type: string + args: + description: Constructor arguments + type: array + kwargs: + description: Constructor keyword arguments + type: object + backends: + description: Registration backends definitions + type: array + items: + description: A single registration scheme definition + type: object + properties: + type: + description: The registration scheme type. + type: string + enum: ['eoxserver'] + filter: + description: Optional filter to only be used for these paths + type: string + args: + description: Constructor arguments + type: array + kwargs: + description: Constructor keyword arguments + type: object + pre_handlers: + description: List of handlers to be run prior the registration of an item. + type: array + items: + description: A single pre-registration handler + type: object + properties: + path: + description: Python module path to the registration handler + type: string + args: + description: arguments for the initialization of the handler + type: array + kwargs: + description: keyword arguments for the initialization of the handler + type: object + post_handlers: + description: List of handlers to be run prior the registration of an item. + type: array + items: + description: A single pre-registration handler + type: object + properties: + path: + description: Python module path to the registration handler + type: string + args: + description: arguments for the initialization of the handler + type: array + kwargs: + description: keyword arguments for the initialization of the handler + type: object diff --git a/core/registrar/config.py b/core/registrar/config.py new file mode 100644 index 0000000000000000000000000000000000000000..77534e9411ce9e598d059a53460d40565408870d --- /dev/null +++ b/core/registrar/config.py @@ -0,0 +1,39 @@ +import os +from typing import TextIO +import re + +import yaml + + +ENV_PATTERN = re.compile(r'.*?\${(\w+)}.*?') + +def constructor_env_variables(loader, node): + """ + Extracts the environment variable from the node's value + :param yaml.Loader loader: the yaml loader + :param node: the current node in the yaml + :return: the parsed string that contains the value of the environment + variable + """ + value = loader.construct_scalar(node) + match = ENV_PATTERN.findall(value) # to find all env variables in line + if match: + full_value = value + for g in match: + full_value = full_value.replace( + f'${{{g}}}', os.environ.get(g, g) + ) + return full_value + return value + + +def load_config(input_file: TextIO): + tag = '!env' + loader = yaml.SafeLoader + + # the tag will be used to mark where to start searching for the pattern + # e.g. somekey: !env somestring${MYENVVAR}blah blah blah + loader.add_implicit_resolver(tag, ENV_PATTERN, None) + loader.add_constructor(tag, constructor_env_variables) + + return yaml.load(input_file, Loader=loader) diff --git a/core/registrar/context.py b/core/registrar/context.py new file mode 100644 index 0000000000000000000000000000000000000000..6384461116fa35526986998599c81eb1333a1666 --- /dev/null +++ b/core/registrar/context.py @@ -0,0 +1,14 @@ +from dataclasses import dataclass, field + + +@dataclass +class Context: + identifier: str + path: str + product_type: str = None + product_level: str = None + metadata: dict = field(default_factory=dict) + raster_files: dict = field(default_factory=dict) + metadata_files: dict = field(default_factory=dict) + masks: dict = field(default_factory=dict) + mask_files: dict = field(default_factory=dict) diff --git a/core/registrar/daemon.py b/core/registrar/daemon.py new file mode 100644 index 0000000000000000000000000000000000000000..efdf1ff565e5bda7c1b6bb80cd6465e027829cfe --- /dev/null +++ b/core/registrar/daemon.py @@ -0,0 +1,31 @@ +import logging +import json + +import redis + +from .registrar import register_file + + +logger = logging.getLogger(__name__) + + +def run_daemon(config, replace, host, port, listen_queue, registered_set_key): + """ Run the registrar daemon, listening on a redis queue + for files to be registered. After preprocessing the filename + of the registered files will be pushed to the output queue. + """ + # initialize the queue client + client = redis.Redis( + host=host, port=port, charset="utf-8", decode_responses=True + ) + logger.debug("waiting for redis queue '%s'..." % listen_queue) + while True: + # fetch an item from the queue to be registered + _, value = client.brpop(listen_queue) + # start the registration on that file + try: + item = register_file(config, value, replace) + client.sadd(registered_set_key, item.identifier) + + except Exception as e: + logger.exception(e) diff --git a/core/registrar/exceptions.py b/core/registrar/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..81a2e41ebfd0375fda00dbfc0a3ca6d4306ce5b0 --- /dev/null +++ b/core/registrar/exceptions.py @@ -0,0 +1,4 @@ + + +class RegistrationError(Exception): + pass diff --git a/core/registrar/post_handlers.py b/core/registrar/post_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..322d77c4bc624ee48a3a28a4a2aabaea2cac03cb --- /dev/null +++ b/core/registrar/post_handlers.py @@ -0,0 +1,53 @@ +import os.path +import textwrap +from datetime import datetime +import logging + +from .context import Context +from .utils import isoformat +from .xml import escape + + +logger = logging.getLogger(__name__) + + +class ReportingPostHandler: + def __init__(self, service_url: str, reporting_dir: str): + self.service_url = service_url + self.reporting_dir = reporting_dir + + def __call__(self, config: dict, path: str, context: Context): + inserted = datetime.now() + timestamp = inserted.strftime("%Y%m%dT%H%M%S") + filename = os.path.join(self.reporting_dir, 'item_%s_%s.xml' % (timestamp, context.identifier)) + logger.info(f"Generating report for path {path} at {filename}") + with open(filename, 'w') as f: + f.write(textwrap.dedent(""" + + + {identifier} + {availability_time} + + WCS + {wms_capabilities_url} + + + WMS + {wcs_capabilities_url} + + + """.format( + identifier=escape(context.identifier), + availability_time=escape(isoformat(inserted)), + wcs_capabilities_url=escape( + '%s/ows?service=wcs&request=GetCapabilities&cql=identifier="%s"' + % (self.service_url, context.identifier) + ), + wms_capabilities_url=escape( + '%s/ows?service=wms&request=GetCapabilities&cql=identifier="%s"' + % (self.service_url, context.identifier) + ), + ))) diff --git a/core/registrar/registrar.py b/core/registrar/registrar.py new file mode 100644 index 0000000000000000000000000000000000000000..968ff05422939d68ec30bd83c3ad62f963a37091 --- /dev/null +++ b/core/registrar/registrar.py @@ -0,0 +1,63 @@ +import re +import logging +import importlib + +from .source import get_source +from .scheme import get_scheme +from .backend import get_backends +from .exceptions import RegistrationError + + +logger = logging.getLogger(__name__) + + +def register_file(config: dict, path: str, replace: bool=False): + """ Handle the registration of a single path. + """ + logger.info(f"Handling '{path}'.") + source = get_source(config, path) + scheme = get_scheme(config, path) + context = scheme.get_context(source, path) + + for pre_handler in get_pre_handlers(config): + pre_handler(config, path, context) + + for backend in get_backends(config, path): + if backend.exists(source, context): + if replace: + logger.info(f"Replacing '{path}'.") + backend.register(source, context, replace=True) + else: + raise RegistrationError(f'Object {context} is already registered') + else: + logger.info(f"Registering '{path}'.") + backend.register(source, context, replace=False) + + for post_handler in get_post_handlers(config): + post_handler(config, path, context) + + logger.info(f"Successfully {'replaced' if replace else 'registered'} '{path}'") + return context + + +def _get_handlers(config, name): + handlers = [] + for handler_def in config.get(name, []): + module_path, _, handler_name = handler_def['path'].rpartition('.') + handler_cls = getattr(importlib.import_module(module_path), handler_name) + handlers.append( + handler_cls( + *handler_def.get('args', []), + **handler_def.get('kwargs', []), + ) + ) + + return handlers + + +def get_pre_handlers(config): + return _get_handlers(config, 'pre_handlers') + + +def get_post_handlers(config): + return _get_handlers(config, 'post_handlers') diff --git a/core/registrar/scheme.py b/core/registrar/scheme.py new file mode 100644 index 0000000000000000000000000000000000000000..71d508d350c8c4a724afb1870ae5cfc0d17ee439 --- /dev/null +++ b/core/registrar/scheme.py @@ -0,0 +1,208 @@ +import re +from os.path import join +import logging + +from .xml import read_xml, parse_metadata_schema, Parameter +from .context import Context +from .source import Source +from .exceptions import RegistrationError + + +logger = logging.getLogger(__name__) + +class RegistrationScheme: + def get_context(self): + raise NotImplementedError + + +def parse_datetime(value): + return value + + +def pairwise(iterable): + "s -> (s0,s1), (s2,s3), (s4, s5), ..." + a = iter(iterable) + return zip(a, a) + + +def parse_footprint(value): + coord_list = ','.join( + f'{x} {y}' + for y, x in pairwise(value.split()) + ) + return f'POLYGON(({coord_list}))' + + +class Sentinel2RegistrationScheme(RegistrationScheme): + MTD_MSIL2A_SCHEMA = { + 'begin_time': Parameter('/n1:Level-2A_User_Product/n1:General_Info/Product_Info/PRODUCT_START_TIME/text()', False, parse_datetime), + 'end_time': Parameter('/n1:Level-2A_User_Product/n1:General_Info/Product_Info/PRODUCT_STOP_TIME/text()', False, parse_datetime), + 'identifier': Parameter('/n1:Level-2A_User_Product/n1:General_Info/Product_Info/PRODUCT_URI/text()'), + 'footprint': Parameter('/n1:Level-2A_User_Product/n1:Geometric_Info/Product_Footprint/Product_Footprint/Global_Footprint/EXT_POS_LIST/text()', False, parse_footprint), + 'level': Parameter('/n1:Level-2A_User_Product/n1:General_Info/Product_Info/PROCESSING_LEVEL/text()'), + 'type': Parameter('/n1:Level-2A_User_Product/n1:General_Info/Product_Info/PRODUCT_TYPE/text()'), + 'generation_time': Parameter('/n1:Level-2A_User_Product/n1:General_Info/Product_Info/GENERATION_TIME/text()', False, parse_datetime), + 'cloud_cover': Parameter('/n1:Level-2A_User_Product/n1:Quality_Indicators_Info/Cloud_Coverage_Assessment/text()'), + 'image_file_paths': Parameter('/n1:Level-2A_User_Product/n1:General_Info/Product_Info/Product_Organisation/Granule_List/Granule/IMAGE_FILE/text()', True), + } + + MTD_TL_SCHEMA = { + 'mask_file_paths': Parameter('/n1:Level-2A_Tile_ID/n1:Quality_Indicators_Info/Pixel_Level_QI/MASK_FILENAME/text()', True), + } + + MTD_MSIL2A_NAMESPACES = { + 'n1': "https://psd-14.sentinel2.eo.esa.int/PSD/User_Product_Level-2A.xsd" + } + + MTD_TL_NAMESPACES = { + 'n1': 'https://psd-14.sentinel2.eo.esa.int/PSD/S2_PDI_Level-2A_Tile_Metadata.xsd' + } + + def get_context(self, source: Source, path: str): + metadata_file = join(path, 'MTD_MSIL2A.xml') + tree = read_xml(source, metadata_file) + + # get product metadata + metadata = parse_metadata_schema(tree, self.MTD_MSIL2A_SCHEMA, self.MTD_MSIL2A_NAMESPACES) + + band_re = re.compile(r'.*([A-Z0-9]{3}_[0-9]{2}m)$') + raster_files = { + band_re.match(image_file_path).groups()[0]: f'{join(path, image_file_path)}.jp2' + for image_file_path in metadata['image_file_paths'] + } + + # get granule metadata + mtd_files = source.list_files(join(path, 'GRANULE'), '*/MTD_TL.xml') + logger.info(f'{mtd_files}') + tl_tree = read_xml(source, mtd_files[0]) + tile_metadata = parse_metadata_schema(tl_tree, self.MTD_TL_SCHEMA, self.MTD_TL_NAMESPACES) + + mask_type_re = re.compile(r'.*/MSK_([A-Z]*)_([A-Z0-9]{3}).[a-z0-9]+$') + mask_files = { + mask_type_re.match(mask_file_path).groups()[0]: join(path, mask_file_path) + for mask_file_path in tile_metadata['mask_file_paths'] + if mask_type_re.match(mask_file_path) is not None + } + + return Context( + identifier=metadata['identifier'], + path=path, + product_type=metadata['type'], + product_level=metadata['level'], + raster_files=raster_files, + mask_files=mask_files, + metadata_files=[metadata_file], + metadata={ + 'begin_time': metadata['begin_time'], + 'end_time': metadata['end_time'], + 'generation_time': metadata['generation_time'], + 'cloud_cover': metadata['cloud_cover'], + 'footprint': metadata['footprint'], + } + ) + + +def parse_ring(string): + raw_coords = string.split() + return [(lon, lat) for lat, lon in pairwise(raw_coords)] + + +def parse_polygons_gsc(elem): + def serialize_coord_list(coords): + return ','.join( + f'{x} {y}' for x, y in coords + ) + + interior = serialize_coord_list( + parse_ring( + elem.xpath( + "gml:exterior/gml:LinearRing/gml:posList", namespaces=elem.nsmap + )[0].text.strip() + ) + ) + + exteriors = [ + f'''({ + serialize_coord_list( + parse_ring(poslist_elem.text.strip()) + ) + })''' + for poslist_elem in elem.xpath( + "gml:interior/gml:LinearRing/gml:posList", namespaces=elem.nsmap + ) + ] + + return f"POLYGON(({interior}){',' if exteriors else ''}{','.join(exteriors)})" + + +class GSCRegistrationScheme(RegistrationScheme): + GSC_SCHEMA = { + 'identifier': Parameter('//gml:metaDataProperty/gsc:EarthObservationMetaData/eop:identifier/text()'), + 'type': Parameter('//gml:using/eop:EarthObservationEquipment/eop:platform/eop:Platform/eop:shortName/text()'), + 'level': Parameter('//gml:metaDataProperty/gsc:EarthObservationMetaData/eop:parentIdentifier/text()'), + 'mask': Parameter('//gsc:opt_metadata/gml:metaDataProperty/gsc:EarthObservationMetaData/eop:vendorSpecific/eop:SpecificInformation[eop:localAttribute/text() = "CF_POLY"]/eop:localValue/text()', True), + 'footprint': Parameter( + '//gml:target/eop:Footprint/gml:multiExtentOf/gml:MultiSurface/gml:surfaceMembers/gml:Polygon', + False, parse_polygons_gsc + ), + } + + def __init__(self, level_re: str=r'.*(Level_[0-9]+)$'): + self.level_re = level_re + + def get_context(self, source: Source, path: str) -> Context: + gsc_filenames = source.list_files(path, ['GSC*.xml', 'GSC*.XML']) + metadata_file = gsc_filenames[0] + + tree = read_xml(source, metadata_file) + metadata = parse_metadata_schema(tree, self.GSC_SCHEMA, tree.getroot().nsmap) + + tiff_files = { + metadata['type']: source.list_files(path, ['*.tif', '*.TIF']) + } + + match = re.match(self.level_re, metadata['level']) + if match: + level = match.groups()[0] + else: + level = None + + return Context( + identifier=metadata['identifier'], + path=path, + product_type=metadata['type'], + product_level=level, + raster_files=tiff_files, + masks={ + 'validity': metadata['mask'][0] if metadata['mask'] else None + }, + metadata_files=[metadata_file], + metadata={ + 'footprint': metadata['footprint'], + } + ) + + +REGISTRATION_SCHEMES = { + 'gsc': GSCRegistrationScheme, + 'sentinel-2': Sentinel2RegistrationScheme, +} + +def get_scheme(config: dict, path: str) -> RegistrationScheme: + cfg_schemes = config['schemes'] + + for cfg_scheme in cfg_schemes: + if cfg_scheme.get('filter'): + if re.match(cfg_scheme['filter'], path): + break + else: + break + else: + # no source found + raise RegistrationError(f'Could not find a suitable scheme for the path {path}') + + return REGISTRATION_SCHEMES[cfg_scheme['type']]( + *cfg_scheme.get('args', []), + **cfg_scheme.get('kwargs', {}), + ) + diff --git a/core/registrar/source.py b/core/registrar/source.py new file mode 100644 index 0000000000000000000000000000000000000000..ce04366251c1c3711d79e0c010ceba9dcf118403 --- /dev/null +++ b/core/registrar/source.py @@ -0,0 +1,256 @@ +import re +from os.path import normpath, join, isabs +import shutil +from glob import glob +from fnmatch import fnmatch +import logging + +import boto3 +from swiftclient.multithreading import OutputManager +from swiftclient.service import SwiftError, SwiftService + + +logger = logging.getLogger(__name__) + +class RegistrationError(Exception): + pass + + +class Source: + def __init__(self, name: str=None): + self.name = name + + def get_container_and_path(self, path): + raise NotImplementedError + + def list_files(self, path, glob_pattern=None): + raise NotImplementedError + + def get_file(self, path, target_path): + raise NotImplementedError + + def get_vsi_env_and_path(self, path): + raise NotImplementedError + + +class SwiftSource(Source): + def __init__(self, name=None, username=None, password=None, tenant_name=None, + tenant_id=None, region_name=None, user_domain_id=None, + user_domain_name=None, auth_url=None, auth_url_short=None, + auth_version=None, container=None): + super().__init__(name) + + self.username = username + self.password = password + self.tenant_name = tenant_name + self.tenant_id = tenant_id + self.region_name = region_name + self.user_domain_id = user_domain_id + self.user_domain_name = user_domain_name + self.auth_url = auth_url + self.auth_url_short = auth_url_short + self.auth_version = auth_version # TODO: assume 3 + self.container = container + + def get_service(self): + return SwiftService(options={ + "os_username": self.username, + "os_password": self.password, + "os_tenant_name": self.tenant_name, + "os_tenant_id": self.tenant_id, + "os_region_name": self.region_name, + "os_auth_url": self.auth_url, + "auth_version": self.auth_version, + "os_user_domain_id": self.user_domain_id, + "os_user_domain_name": self.user_domain_name, + }) + + def get_container_and_path(self, path: str): + container = self.container + if container is None: + parts = (path[1:] if path.startswith('/') else path).split('/') + container, path = parts[0], '/'.join(parts[1:]) + + return container, path + + def list_files(self, path, glob_patterns=None): + container, path = self.get_container_and_path(path) + + if glob_patterns and not isinstance(glob_patterns, list): + glob_patterns = [glob_patterns] + + with self.get_service() as swift: + pages = swift.list( + container=container, + options={"prefix": path}, + ) + + filenames = [] + for page in pages: + if page["success"]: + # at least two files present -> pass validation + for item in page["listing"]: + if glob_patterns is None or any( + fnmatch(item['name'], join(path, glob_pattern)) for glob_pattern in glob_patterns): + + filenames.append( + item['name'] if self.container else join(container, item['name']) + ) + else: + raise page['error'] + + return filenames + + def get_file(self, path, target_path): + container, path = self.get_container_and_path(path) + + with self.get_service() as swift: + results = swift.download( + container, + [path], + options={ + 'out_file': target_path + } + ) + + for result in results: + if not result["success"]: + raise Exception('Failed to download %s' % path) + + def get_vsi_env_and_path(self, path): + container, path = self.get_container_and_path(path) + return { + 'OS_IDENTITY_API_VERSION': self.auth_version, + 'OS_AUTH_URL': self.auth_url, + 'OS_USERNAME': self.username, + 'OS_PASSWORD': self.password, + 'OS_USER_DOMAIN_NAME': self.user_domain_name, + # 'OS_PROJECT_NAME': self.tena, + # 'OS_PROJECT_DOMAIN_NAME': , + 'OS_REGION_NAME': self.region_name, + }, f'/vsiswift/{container}/{path}' + + +class S3Source(Source): + def __init__(self, name=None, bucket_name=None, secret_access_key=None, access_key_id=None, endpoint_url=None, strip_bucket=True, **client_kwargs): + super().__init__(name) + + # see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session.client + # for client_kwargs + self.bucket_name = bucket_name + self.secret_access_key=secret_access_key + self.access_key_id=access_key_id + self.endpoint_url = endpoint_url + self.strip_bucket = strip_bucket + + self.client = boto3.client( + 's3', + aws_secret_access_key=secret_access_key, + aws_access_key_id=access_key_id, + endpoint_url=endpoint_url, + **client_kwargs, + ) + + def get_container_and_path(self, path: str): + bucket = self.bucket_name + if bucket is None: + parts = (path[1:] if path.startswith('/') else path).split('/') + bucket, path = parts[0], '/'.join(parts[1:]) + elif self.strip_bucket: + parts = (path[1:] if path.startswith('/') else path).split('/') + if parts[0] == bucket: + parts.pop(0) + path = '/'.join(parts) + + return bucket, path + + def list_files(self, path, glob_patterns=None): + if glob_patterns and not isinstance(glob_patterns, list): + glob_patterns = [glob_patterns] + + bucket, key = self.get_container_and_path(path) + logger.info(f'Listing S3 files for bucket {bucket} and prefix {key}') + response = self.client.list_objects_v2( + Bucket=bucket, + Prefix=key, + ) + + return [ + f"{bucket}/{item['Key']}" + for item in response['Contents'] + if glob_patterns is None or any( + fnmatch(item['Key'], glob_pattern) for glob_pattern in glob_patterns + ) + ] + + def get_file(self, path, target_path): + bucket, key = self.get_container_and_path(path) + logger.info(f'Retrieving file from S3 {bucket}/{key} to be stored at {target_path}') + self.client.download_file(bucket, key, target_path) + + def get_vsi_env_and_path(self, path: str, streaming: bool=False): + bucket, key = self.get_container_and_path(path) + return { + 'AWS_SECRET_ACCESS_KEY': self.secret_access_key, + 'AWS_ACCESS_KEY_ID': self.access_key_id, + 'AWS_S3_ENDPOINT': self.endpoint_url, + }, f'/{"vsis3" if not streaming else "vsis3_streaming"}/{bucket}/{key}' + + +class LocalSource(Source): + def __init__(self, name, root_directory): + super().__init__(name) + + self.root_directory = root_directory + + def get_container_and_path(self, path): + return (self.root_directory, path) + + def _join_path(self, path): + path = normpath(path) + if isabs(path): + path = path[1:] + + return join(self.root_directory, path) + + def list_files(self, path, glob_patterns=None): + if glob_patterns and not isinstance(glob_patterns, list): + glob_patterns = [glob_patterns] + + if glob_patterns is not None: + return glob(join(self._join_path(path), glob_patterns[0])) # TODO + else: + return glob(join(self._join_path(path), '*')) + + def get_file(self, path, target_path): + shutil.copy(self._join_path(path), target_path) + + def get_vsi_env_and_path(self, path): + return {}, self._join_path(path) + + +SOURCE_TYPES = { + 'swift': SwiftSource, + 's3': S3Source, + 'local': LocalSource, +} + + +def get_source(config: dict, path: str) -> Source: + cfg_sources = config['sources'] + + for cfg_source in cfg_sources: + if cfg_source.get('filter'): + if re.match(cfg_source['filter'], path): + break + else: + break + else: + # no source found + raise RegistrationError(f'Could not find a suitable source for the path {path}') + + return SOURCE_TYPES[cfg_source['type']]( + cfg_source['name'], + *cfg_source.get('args', []), + **cfg_source.get('kwargs', {}) + ) diff --git a/core/registrar/utils.py b/core/registrar/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8a522c91ae78540930fb6edce786ba131e40b3fe --- /dev/null +++ b/core/registrar/utils.py @@ -0,0 +1,12 @@ +def isoformat(dt): + """ Formats a datetime object to an ISO string. Timezone naive datetimes are + are treated as UTC Zulu. UTC Zulu is expressed with the proper "Z" + ending and not with the "+00:00" offset declaration. + + :param dt: the :class:`datetime.datetime` to encode + :returns: an encoded string + """ + if not dt.utcoffset(): + dt = dt.replace(tzinfo=None) + return dt.isoformat("T") + "Z" + return dt.isoformat("T") diff --git a/core/registrar/xml.py b/core/registrar/xml.py new file mode 100644 index 0000000000000000000000000000000000000000..8a25ff07c411ace15f75a5021261569055328841 --- /dev/null +++ b/core/registrar/xml.py @@ -0,0 +1,54 @@ +from os import remove +from os.path import join, basename +from tempfile import gettempdir, gettempprefix +from dataclasses import dataclass, field +from typing import Union, Type, Optional, List, Callable, Any +import logging +from xml.sax.saxutils import escape + +import lxml.etree + +from .source import Source +from .exceptions import RegistrationError + + +logger = logging.getLogger(__name__) + +def read_xml(source: Source, path: str) -> lxml.etree._ElementTree: + out_filename = join(gettempdir(), basename(path)) + try: + source.get_file(path, out_filename) + tree = lxml.etree.parse(out_filename) + finally: + remove(out_filename) + return tree + +@dataclass +class Parameter: + xpath: str + multi: bool = False + parser: Optional[Callable[[str], Any]] = None + namespaces: dict = field(default_factory=dict) + + +class ParserError(RegistrationError): + pass + +def parse_metadata_schema(tree: lxml.etree._ElementTree, schema: dict, namespaces: dict=None) -> dict: + out = {} + for key, param in schema.items(): + values = tree.xpath(param.xpath, namespaces=param.namespaces or namespaces) + if param.multi: + value = [ + param.parser(v) if param.parser else v + for v in values + ] + else: + try: + value = param.parser(values[0]) if param.parser else values[0] + except IndexError: + raise ParserError(f'Failed to fetch single value for parameter {key}') + + out[key] = value + + return out diff --git a/core/run-registrar.sh b/core/run-registrar.sh index 348b4f75081870185eb370e84b568f3f46254cd6..a1bae61792c574e203a50054ed4d1943f80dbb51 100644 --- a/core/run-registrar.sh +++ b/core/run-registrar.sh @@ -6,13 +6,10 @@ if test "$REGISTRAR_REPLACE" = true; then replace="--replace" fi -python3 /registrar.py \ - --mode redis \ - --redis-host ${REDIS_HOST} \ - --redis-port ${REDIS_PORT} \ - --redis-register-queue-key ${REDIS_REGISTER_QUEUE_KEY} \ - --redis-registered-set-key ${REDIS_REGISTERED_SET_KEY} \ - --redis-registered-set-key ${REDIS_REGISTERED_SET_KEY} \ - --reporting-dir ${REPORTING_DIR} \ - --service-url ${SERVICE_URL} \ +registrar daemon \ + --config-file /config.yaml \ + --host ${REDIS_HOST} \ + --port ${REDIS_PORT} \ + --listen-queue ${REDIS_REGISTER_QUEUE_KEY} \ + --registered-set-key ${REDIS_REGISTERED_SET_KEY} \ ${replace} >&2 diff --git a/core/setup.py b/core/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..b44d89b2d4b7ddd125b94a7f5ce15aaafaf3fc97 --- /dev/null +++ b/core/setup.py @@ -0,0 +1,28 @@ +from setuptools import setup, find_packages + +# with open("README.md", "r") as fh: +# long_description = fh.read() +long_description = "" + +setup( + name="registrar", # Replace with your own username + version="0.0.1", + author="", + author_email="", + description="registrar for PVS", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://gitlab.eox.at/esa/prism/vs/-/tree/master/core", + packages=find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires='>=3.6', + entry_points={ + "console_scripts": [ + "registrar = registrar.cli:cli", + ], + } +) diff --git a/docker-compose.base.ops.yml b/docker-compose.base.ops.yml index 5aac2ba8e5d0f251bae5a89d82edf3ca513da292..7b31c830c595c85fe1e0eeb81416300cc0feabdc 100644 --- a/docker-compose.base.ops.yml +++ b/docker-compose.base.ops.yml @@ -1,4 +1,5 @@ version: "3.6" +x-vs-version: :release-1.0.0-rc.2 # bumpversion services: reverse-proxy: image: traefik:2.1 @@ -19,7 +20,7 @@ services: environment: HTTP_PROXY: "http://172.30.252.68:3128" HTTPS_PROXY: "http://172.30.252.68:3128" - NO_PROXY: "172.0.0.0/8,192.168.0.0/16,10.0.0.0/8" + NO_PROXY: "172.0.0.0/8,192.168.0.0/16,10.0.0.0/8,shibauth" deploy: placement: constraints: [node.role == manager] @@ -46,4 +47,4 @@ secrets: BASIC_AUTH_USERS_APIAUTH: external: true BASIC_AUTH_USERS_AUTH: - external: true + external: true \ No newline at end of file diff --git a/docker-compose.dem.dev.yml b/docker-compose.dem.dev.yml index 56ee0072c3a2cf7c2f49eebd82eaac84504007b9..d39ce234c8f19ac600c14283c116d4c3ecf092e6 100644 --- a/docker-compose.dem.dev.yml +++ b/docker-compose.dem.dev.yml @@ -42,8 +42,6 @@ services: - type: bind source: ./core/ target: /core/ - logging: - driver: "fluentd" cache: image: registry.gitlab.eox.at/esa/prism/vs/pvs_cache:dev ports: diff --git a/docker-compose.dem.ops.yml b/docker-compose.dem.ops.yml index 6cccbfbdcba5d165994d37a0acea01ed4dec791a..5184b39dc967baa24794696f481d4d416919096f 100644 --- a/docker-compose.dem.ops.yml +++ b/docker-compose.dem.ops.yml @@ -1,4 +1,5 @@ version: "3.6" +x-vs-version: :release-1.0.0-rc.2 # bumpversion services: database: volumes: @@ -7,21 +8,22 @@ services: tmpfs: size: 536870912 renderer: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:release-1.0.0-rc.2 # bumpversion environment: INSTALL_DIR: "/var/www/pvs/ops/" INSTANCE_DIR: "/var/www/pvs/ops/pvs_instance/" deploy: labels: - # router for basic auth based access (https) - - "traefik.http.routers.dem-renderer.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" - - "traefik.http.routers.dem-renderer.middlewares=auth@file,compress@file,cors@file" - - "traefik.http.routers.dem-renderer.tls=true" - - "traefik.http.routers.dem-renderer.tls.certresolver=default" - - "traefik.http.routers.dem-renderer.entrypoints=https" - # router for basic auth based access (http) - - "traefik.http.routers.dem-renderer-redirect.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" - - "traefik.http.routers.dem-renderer-redirect.middlewares=redirect@file" - - "traefik.http.routers.dem-renderer-redirect.entrypoints=http" + # router for shib auth based access (https) + - "traefik.http.routers.dem-renderer-shib.rule=Host(`dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.dem-renderer-shib.middlewares=shibAuth@file,compress@file,cors@file" + - "traefik.http.routers.dem-renderer-shib.tls=true" + - "traefik.http.routers.dem-renderer-shib.tls.certresolver=default" + - "traefik.http.routers.dem-renderer-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.dem-renderer-redirect-shib.rule=Host(`dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.dem-renderer-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.dem-renderer-redirect-shib.entrypoints=http" # router for referrer based access (https) - "traefik.http.routers.dem-renderer_referer.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|dem.pdas.prism.eox.at|dem.pass.copernicus.eu)/?`)" - "traefik.http.routers.dem-renderer_referer.middlewares=compress@file,cors@file" @@ -32,6 +34,16 @@ services: - "traefik.http.routers.dem-renderer_referer-redirect.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|dem.pdas.prism.eox.at|dem.pass.copernicus.eu)/?`)" - "traefik.http.routers.dem-renderer_referer-redirect.middlewares=redirect@file" - "traefik.http.routers.dem-renderer_referer-redirect.entrypoints=http" + # router for basic auth based access (https) + - "traefik.http.routers.dem-renderer.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.dem-renderer.middlewares=auth@file,compress@file,cors@file" + - "traefik.http.routers.dem-renderer.tls=true" + - "traefik.http.routers.dem-renderer.tls.certresolver=default" + - "traefik.http.routers.dem-renderer.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.dem-renderer-redirect.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.dem-renderer-redirect.middlewares=redirect@file" + - "traefik.http.routers.dem-renderer-redirect.entrypoints=http" # general - "traefik.http.services.dem-renderer.loadbalancer.sticky=false" - "traefik.http.services.dem-renderer.loadbalancer.server.port=80" @@ -48,22 +60,23 @@ services: networks: - extnet cache: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_cache:release-1.0.0-rc.2 # bumpversion configs: - source: mapcache-ops target: /mapcache-template.xml deploy: labels: - "traefik.http.middlewares.cache-stripprefix.stripprefix.prefixes=/cache" - # router for basic auth based access (https) - - "traefik.http.routers.dem-cache.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/cache`)" - - "traefik.http.routers.dem-cache.middlewares=auth@file,cache-stripprefix,compress@file,cors@file" - - "traefik.http.routers.dem-cache.tls=true" - - "traefik.http.routers.dem-cache.tls.certresolver=default" - - "traefik.http.routers.dem-cache.entrypoints=https" - # router for basic auth based access (http) - - "traefik.http.routers.dem-cache-redirect.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/cache`)" - - "traefik.http.routers.dem-cache-redirect.middlewares=redirect@file" - - "traefik.http.routers.dem-cache-redirect.entrypoints=http" + # router for shib auth based access (https) + - "traefik.http.routers.dem-cache-shib.rule=Host(`dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/cache`)" + - "traefik.http.routers.dem-cache-shib.middlewares=shibAuthCache@file,cache-stripprefix,compress@file,cors@file" + - "traefik.http.routers.dem-cache-shib.tls=true" + - "traefik.http.routers.dem-cache-shib.tls.certresolver=default" + - "traefik.http.routers.dem-cache-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.dem-cache-redirect-shib.rule=Host(`dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/cache`)" + - "traefik.http.routers.dem-cache-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.dem-cache-redirect-shib.entrypoints=http" # router for referrer based access (https) - "traefik.http.routers.dem-cache_referer.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/cache`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|dem.pdas.prism.eox.at|dem.pass.copernicus.eu)/?`)" - "traefik.http.routers.dem-cache_referer.middlewares=cache-stripprefix,compress@file,cors@file" @@ -74,6 +87,16 @@ services: - "traefik.http.routers.dem-cache_referer-redirect.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/cache`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|dem.pdas.prism.eox.at|dem.pass.copernicus.eu)/?`)" - "traefik.http.routers.dem-cache_referer-redirect.middlewares=redirect@file" - "traefik.http.routers.dem-cache_referer-redirect.entrypoints=http" + # router for basic auth based access (https) + - "traefik.http.routers.dem-cache.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`) && PathPrefix(`/cache`)" + - "traefik.http.routers.dem-cache.middlewares=auth@file,cache-stripprefix,compress@file,cors@file" + - "traefik.http.routers.dem-cache.tls=true" + - "traefik.http.routers.dem-cache.tls.certresolver=default" + - "traefik.http.routers.dem-cache.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.dem-cache-redirect.rule=Host(`dem.pdas.prism.eox.at`, `a.dem.pdas.prism.eox.at`, `b.dem.pdas.prism.eox.at`, `c.dem.pdas.prism.eox.at`, `d.dem.pdas.prism.eox.at`, `e.dem.pdas.prism.eox.at`, `f.dem.pdas.prism.eox.at`, `g.dem.pdas.prism.eox.at`, `h.dem.pdas.prism.eox.at`) && PathPrefix(`/cache`)" + - "traefik.http.routers.dem-cache-redirect.middlewares=redirect@file" + - "traefik.http.routers.dem-cache-redirect.entrypoints=http" # general - "traefik.http.services.dem-cache.loadbalancer.sticky=false" - "traefik.http.services.dem-cache.loadbalancer.server.port=80" @@ -90,6 +113,7 @@ services: networks: - extnet registrar: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:release-1.0.0-rc.2 # bumpversion environment: INSTALL_DIR: "/var/www/pvs/ops/" INSTANCE_DIR: "/var/www/pvs/ops/pvs_instance/" @@ -99,19 +123,30 @@ services: constraints: - node.labels.type == internal client: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_client:release-1.0.0-rc.2 # bumpversion configs: - source: client-ops target: /usr/share/nginx/html/index.html deploy: labels: + # router for shib auth based access (https) + - "traefik.http.routers.dem-client-shib.rule=Host(`dem.pass.copernicus.eu`)" + - "traefik.http.routers.dem-client-shib.middlewares=shibAuthCache@file,compress@file" + - "traefik.http.routers.dem-client-shib.tls=true" + - "traefik.http.routers.dem-client-shib.tls.certresolver=default" + - "traefik.http.routers.dem-client-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.dem-client-redirect-shib.rule=Host(`dem.pass.copernicus.eu`)" + - "traefik.http.routers.dem-client-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.dem-client-redirect-shib.entrypoints=http" # router for basic auth based access (https) - - "traefik.http.routers.dem-client.rule=Host(`dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`)" + - "traefik.http.routers.dem-client.rule=Host(`dem.pdas.prism.eox.at`)" - "traefik.http.routers.dem-client.middlewares=auth@file,compress@file" - "traefik.http.routers.dem-client.tls=true" - "traefik.http.routers.dem-client.tls.certresolver=default" - "traefik.http.routers.dem-client.entrypoints=https" # router for basic auth based access (http) - - "traefik.http.routers.dem-client-redirect.rule=Host(`dem.pdas.prism.eox.at`, `dem.pass.copernicus.eu`)" + - "traefik.http.routers.dem-client-redirect.rule=Host(`dem.pdas.prism.eox.at`)" - "traefik.http.routers.dem-client-redirect.middlewares=redirect@file" - "traefik.http.routers.dem-client-redirect.entrypoints=http" # general @@ -126,6 +161,7 @@ services: networks: - extnet preprocessor: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:release-1.0.0-rc.2 # bumpversion volumes: - type: bind source: /var/vhr @@ -135,7 +171,89 @@ services: placement: constraints: - node.labels.type == internal + shibauth: + image: unicon/shibboleth-sp:3.0.4 + environment: + APACHE_SERVERNAME: "https://dem.pass.copernicus.eu:443" + secrets: + - source: DEM_SHIB_CERT + target: SHIB_CERT + - source: DEM_SHIB_KEY + target: SHIB_KEY + - BASIC_AUTH_USERS_AUTH + deploy: + replicas: 1 + placement: + constraints: [node.role == manager] + labels: + # router for basic auth based access (https) + - "traefik.http.routers.dem-shibauth.rule=Host(`dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/secure`, `/secure-cache`, `/Shibboleth.sso`)" + - "traefik.http.routers.dem-shibauth.middlewares=compress@file,cors@file" + - "traefik.http.routers.dem-shibauth.tls=true" + - "traefik.http.routers.dem-shibauth.tls.certresolver=default" + - "traefik.http.routers.dem-shibauth.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.dem-shibauth-redirect.rule=Host(`dem.pass.copernicus.eu`, `a.dem.pass.copernicus.eu`, `b.dem.pass.copernicus.eu`, `c.dem.pass.copernicus.eu`, `d.dem.pass.copernicus.eu`, `e.dem.pass.copernicus.eu`, `f.dem.pass.copernicus.eu`, `g.dem.pass.copernicus.eu`, `h.dem.pass.copernicus.eu`) && PathPrefix(`/secure`, `/secure-cache`, `/Shibboleth.sso`)" + - "traefik.http.routers.dem-shibauth-redirect.middlewares=redirect@file" + - "traefik.http.routers.dem-shibauth-redirect.entrypoints=http" + # general + - "traefik.http.services.dem-shibauth.loadbalancer.sticky=false" + - "traefik.http.services.dem-shibauth.loadbalancer.server.port=80" + - "traefik.docker.network=dem-extnet" + - "traefik.docker.lbswarm=true" + - "traefik.enable=true" + networks: + - extnet + configs: + - source: shib-access-control-conf + target: /etc/shibboleth/pass-ac.xml + - source: shib-access-control-conf-cache + target: /etc/shibboleth/pass-ac-cache.xml + - source: shib-shibboleth2 + target: /etc/shibboleth/shibboleth2.xml + - source: shib-apache + target: /etc/httpd/conf.d/shib.conf + - source: shib-attribute-map + target: /etc/shibboleth/attribute-map.xml + - source: idp-metadata + target: /etc/shibboleth/idp-metadata.xml + - source: shib-index + target: /var/www/html/secure/index.html + - source: shib-index + target: /var/www/html/secure-cache/index.html + - source: shibd-logger + target: /etc/shibboleth/shibd.logger + - source: native-logger + target: /etc/shibboleth/native.logger + ingestor: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor:release-1.0.0-rc.2 # bumpversion networks: extnet: name: dem-extnet external: true +configs: + shib-access-control-conf: + file: ./config/shibboleth/dem-ac.xml + shib-access-control-conf-cache: + file: ./config/shibboleth/dem-ac-cache.xml + shib-shibboleth2: + file: ./config/shibboleth/dem-shibboleth2.xml + shib-apache: + file: ./config/shibboleth/shib-apache.conf + shib-attribute-map: + file: ./config/shibboleth/attribute-map.xml + shib-index: + file: ./config/shibboleth/index.html + native-logger: + file: ./config/shibboleth/native.logger + shibd-logger: + file: ./config/shibboleth/shibd.logger + idp-metadata: + external: true +secrets: + DEM_SHIB_CERT: + external: true + DEM_SHIB_KEY: + external: true + BASIC_AUTH_USERS_AUTH: + external: true diff --git a/docker-compose.dem.yml b/docker-compose.dem.yml index 598bfbfc0063957c1a688bf51592224f10d2a29d..fc4b355fc5b11624504962ee82d429d8559ce467 100644 --- a/docker-compose.dem.yml +++ b/docker-compose.dem.yml @@ -122,7 +122,7 @@ services: OS_PASSWORD_DOWNLOAD_FILE: "/run/secrets/OS_PASSWORD_DOWNLOAD" configs: - source: preprocessor-config - target: /config.yaml + target: /config.yaml deploy: replicas: 1 networks: @@ -160,6 +160,8 @@ services: configs: - source: init-db target: /init-db.sh + - source: registrar-config + target: /config.yaml deploy: replicas: 1 networks: @@ -182,9 +184,8 @@ services: configs: - source: sftp_users_dem target: /etc/sftp/users.conf - ports: - - "2222:22" + - "2224:22" deploy: replicas: 1 ingestor: @@ -212,6 +213,8 @@ configs: file: ./config/dem_index-ops.html preprocessor-config: file: ./config/dem_preprocessor-config.yml + registrar-config: + file: ./config/dem_registrar-config.yml volumes: db-data: redis-data: @@ -227,4 +230,3 @@ secrets: external: true DJANGO_PASSWORD: external: true - \ No newline at end of file diff --git a/docker-compose.emg.ops.yml b/docker-compose.emg.ops.yml index 7517f1aeb239483400662957a69a7708ccd37ab4..4f4d5cd0645dc5766f412f705d9ebc36b72cada6 100644 --- a/docker-compose.emg.ops.yml +++ b/docker-compose.emg.ops.yml @@ -1,4 +1,5 @@ version: "3.6" +x-vs-version: :release-1.0.0-rc.2 # bumpversion services: database: volumes: @@ -7,21 +8,22 @@ services: tmpfs: size: 536870912 renderer: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:release-1.0.0-rc.2 # bumpversion environment: INSTALL_DIR: "/var/www/pvs/ops/" INSTANCE_DIR: "/var/www/pvs/ops/pvs_instance/" deploy: labels: - # router for basic auth based access (https) - - "traefik.http.routers.emg-renderer.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" - - "traefik.http.routers.emg-renderer.middlewares=auth@file,compress@file,cors@file" - - "traefik.http.routers.emg-renderer.tls=true" - - "traefik.http.routers.emg-renderer.tls.certresolver=default" - - "traefik.http.routers.emg-renderer.entrypoints=https" - # router for basic auth based access (http) - - "traefik.http.routers.emg-renderer-redirect.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" - - "traefik.http.routers.emg-renderer-redirect.middlewares=redirect@file" - - "traefik.http.routers.emg-renderer-redirect.entrypoints=http" + # router for shib auth based access (https) + - "traefik.http.routers.emg-renderer-shib.rule=Host(`emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.emg-renderer-shib.middlewares=shibAuth@file,compress@file,cors@file" + - "traefik.http.routers.emg-renderer-shib.tls=true" + - "traefik.http.routers.emg-renderer-shib.tls.certresolver=default" + - "traefik.http.routers.emg-renderer-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.emg-renderer-redirect-shib.rule=Host(`emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.emg-renderer-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.emg-renderer-redirect-shib.entrypoints=http" # router for referrer based access (https) - "traefik.http.routers.emg-renderer_referer.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|emg.pdas.prism.eox.at|emg.pass.copernicus.eu)/?`)" - "traefik.http.routers.emg-renderer_referer.middlewares=compress@file,cors@file" @@ -32,6 +34,16 @@ services: - "traefik.http.routers.emg-renderer_referer-redirect.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|emg.pdas.prism.eox.at|emg.pass.copernicus.eu)/?`)" - "traefik.http.routers.emg-renderer_referer-redirect.middlewares=redirect@file" - "traefik.http.routers.emg-renderer_referer-redirect.entrypoints=http" + # router for basic auth based access (https) + - "traefik.http.routers.emg-renderer.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.emg-renderer.middlewares=auth@file,compress@file,cors@file" + - "traefik.http.routers.emg-renderer.tls=true" + - "traefik.http.routers.emg-renderer.tls.certresolver=default" + - "traefik.http.routers.emg-renderer.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.emg-renderer-redirect.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.emg-renderer-redirect.middlewares=redirect@file" + - "traefik.http.routers.emg-renderer-redirect.entrypoints=http" # general - "traefik.http.services.emg-renderer.loadbalancer.sticky=false" - "traefik.http.services.emg-renderer.loadbalancer.server.port=80" @@ -48,22 +60,23 @@ services: networks: - extnet cache: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_cache:release-1.0.0-rc.2 # bumpversion configs: - source: mapcache-ops target: /mapcache-template.xml deploy: labels: - "traefik.http.middlewares.cache-stripprefix.stripprefix.prefixes=/cache" - # router for basic auth based access (https) - - "traefik.http.routers.emg-cache.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/cache`)" - - "traefik.http.routers.emg-cache.middlewares=auth@file,cache-stripprefix,compress@file,cors@file" - - "traefik.http.routers.emg-cache.tls=true" - - "traefik.http.routers.emg-cache.tls.certresolver=default" - - "traefik.http.routers.emg-cache.entrypoints=https" - # router for basic auth based access (http) - - "traefik.http.routers.emg-cache-redirect.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/cache`)" - - "traefik.http.routers.emg-cache-redirect.middlewares=redirect@file" - - "traefik.http.routers.emg-cache-redirect.entrypoints=http" + # router for shib auth based access (https) + - "traefik.http.routers.emg-cache-shib.rule=Host(`emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/cache`)" + - "traefik.http.routers.emg-cache-shib.middlewares=shibAuthCache@file,cache-stripprefix,compress@file,cors@file" + - "traefik.http.routers.emg-cache-shib.tls=true" + - "traefik.http.routers.emg-cache-shib.tls.certresolver=default" + - "traefik.http.routers.emg-cache-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.emg-cache-redirect-shib.rule=Host(`emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/cache`)" + - "traefik.http.routers.emg-cache-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.emg-cache-redirect-shib.entrypoints=http" # router for referrer based access (https) - "traefik.http.routers.emg-cache_referer.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/cache`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|emg.pdas.prism.eox.at|emg.pass.copernicus.eu)/?`)" - "traefik.http.routers.emg-cache_referer.middlewares=cache-stripprefix,compress@file,cors@file" @@ -74,6 +87,16 @@ services: - "traefik.http.routers.emg-cache_referer-redirect.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/cache`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|emg.pdas.prism.eox.at|emg.pass.copernicus.eu)/?`)" - "traefik.http.routers.emg-cache_referer-redirect.middlewares=redirect@file" - "traefik.http.routers.emg-cache_referer-redirect.entrypoints=http" + # router for basic auth based access (https) + - "traefik.http.routers.emg-cache.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`) && PathPrefix(`/cache`)" + - "traefik.http.routers.emg-cache.middlewares=auth@file,cache-stripprefix,compress@file,cors@file" + - "traefik.http.routers.emg-cache.tls=true" + - "traefik.http.routers.emg-cache.tls.certresolver=default" + - "traefik.http.routers.emg-cache.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.emg-cache-redirect.rule=Host(`emg.pdas.prism.eox.at`, `a.emg.pdas.prism.eox.at`, `b.emg.pdas.prism.eox.at`, `c.emg.pdas.prism.eox.at`, `d.emg.pdas.prism.eox.at`, `e.emg.pdas.prism.eox.at`, `f.emg.pdas.prism.eox.at`, `g.emg.pdas.prism.eox.at`, `h.emg.pdas.prism.eox.at`) && PathPrefix(`/cache`)" + - "traefik.http.routers.emg-cache-redirect.middlewares=redirect@file" + - "traefik.http.routers.emg-cache-redirect.entrypoints=http" # general - "traefik.http.services.emg-cache.loadbalancer.sticky=false" - "traefik.http.services.emg-cache.loadbalancer.server.port=80" @@ -90,6 +113,7 @@ services: networks: - extnet registrar: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:release-1.0.0-rc.2 # bumpversion environment: INSTALL_DIR: "/var/www/pvs/ops/" INSTANCE_DIR: "/var/www/pvs/ops/pvs_instance/" @@ -99,19 +123,30 @@ services: constraints: - node.labels.type == internal client: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_client:release-1.0.0-rc.2 # bumpversion configs: - source: client-ops target: /usr/share/nginx/html/index.html deploy: labels: + # router for shib auth based access (https) + - "traefik.http.routers.emg-client-shib.rule=Host(`emg.pass.copernicus.eu`)" + - "traefik.http.routers.emg-client-shib.middlewares=shibAuthCache@file,compress@file" + - "traefik.http.routers.emg-client-shib.tls=true" + - "traefik.http.routers.emg-client-shib.tls.certresolver=default" + - "traefik.http.routers.emg-client-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.emg-client-redirect-shib.rule=Host(`emg.pass.copernicus.eu`)" + - "traefik.http.routers.emg-client-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.emg-client-redirect-shib.entrypoints=http" # router for basic auth based access (https) - - "traefik.http.routers.emg-client.rule=Host(`emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`)" + - "traefik.http.routers.emg-client.rule=Host(`emg.pdas.prism.eox.at`)" - "traefik.http.routers.emg-client.middlewares=auth@file,compress@file" - "traefik.http.routers.emg-client.tls=true" - "traefik.http.routers.emg-client.tls.certresolver=default" - "traefik.http.routers.emg-client.entrypoints=https" # router for basic auth based access (http) - - "traefik.http.routers.emg-client-redirect.rule=Host(`emg.pdas.prism.eox.at`, `emg.pass.copernicus.eu`)" + - "traefik.http.routers.emg-client-redirect.rule=Host(`emg.pdas.prism.eox.at`)" - "traefik.http.routers.emg-client-redirect.middlewares=redirect@file" - "traefik.http.routers.emg-client-redirect.entrypoints=http" # general @@ -126,6 +161,7 @@ services: networks: - extnet preprocessor: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:release-1.0.0-rc.2 # bumpversion volumes: - type: bind source: /var/vhr @@ -135,7 +171,89 @@ services: placement: constraints: - node.labels.type == internal + shibauth: + image: unicon/shibboleth-sp:3.0.4 + environment: + APACHE_SERVERNAME: "https://emg.pass.copernicus.eu:443" + secrets: + - source: EMG_SHIB_CERT + target: SHIB_CERT + - source: EMG_SHIB_KEY + target: SHIB_KEY + - BASIC_AUTH_USERS_AUTH + deploy: + replicas: 1 + placement: + constraints: [node.role == manager] + labels: + # router for basic auth based access (https) + - "traefik.http.routers.emg-shibauth.rule=Host(`emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/secure`, `/secure-cache`, `/Shibboleth.sso`)" + - "traefik.http.routers.emg-shibauth.middlewares=compress@file,cors@file" + - "traefik.http.routers.emg-shibauth.tls=true" + - "traefik.http.routers.emg-shibauth.tls.certresolver=default" + - "traefik.http.routers.emg-shibauth.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.emg-shibauth-redirect.rule=Host(`emg.pass.copernicus.eu`, `a.emg.pass.copernicus.eu`, `b.emg.pass.copernicus.eu`, `c.emg.pass.copernicus.eu`, `d.emg.pass.copernicus.eu`, `e.emg.pass.copernicus.eu`, `f.emg.pass.copernicus.eu`, `g.emg.pass.copernicus.eu`, `h.emg.pass.copernicus.eu`) && PathPrefix(`/secure`, `/secure-cache`, `/Shibboleth.sso`)" + - "traefik.http.routers.emg-shibauth-redirect.middlewares=redirect@file" + - "traefik.http.routers.emg-shibauth-redirect.entrypoints=http" + # general + - "traefik.http.services.emg-shibauth.loadbalancer.sticky=false" + - "traefik.http.services.emg-shibauth.loadbalancer.server.port=80" + - "traefik.docker.network=emg-extnet" + - "traefik.docker.lbswarm=true" + - "traefik.enable=true" + networks: + - extnet + configs: + - source: shib-access-control-conf + target: /etc/shibboleth/pass-ac.xml + - source: shib-access-control-conf-cache + target: /etc/shibboleth/pass-ac-cache.xml + - source: shib-shibboleth2 + target: /etc/shibboleth/shibboleth2.xml + - source: shib-apache + target: /etc/httpd/conf.d/shib.conf + - source: shib-attribute-map + target: /etc/shibboleth/attribute-map.xml + - source: idp-metadata + target: /etc/shibboleth/idp-metadata.xml + - source: shib-index + target: /var/www/html/secure/index.html + - source: shib-index + target: /var/www/html/secure-cache/index.html + - source: shibd-logger + target: /etc/shibboleth/shibd.logger + - source: native-logger + target: /etc/shibboleth/native.logger + ingestor: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor:release-1.0.0-rc.2 # bumpversion networks: extnet: name: emg-extnet external: true +configs: + shib-access-control-conf: + file: ./config/shibboleth/emg-ac.xml + shib-access-control-conf-cache: + file: ./config/shibboleth/emg-ac-cache.xml + shib-shibboleth2: + file: ./config/shibboleth/emg-shibboleth2.xml + shib-apache: + file: ./config/shibboleth/shib-apache.conf + shib-attribute-map: + file: ./config/shibboleth/attribute-map.xml + shib-index: + file: ./config/shibboleth/index.html + native-logger: + file: ./config/shibboleth/native.logger + shibd-logger: + file: ./config/shibboleth/shibd.logger + idp-metadata: + external: true +secrets: + EMG_SHIB_CERT: + external: true + EMG_SHIB_KEY: + external: true + BASIC_AUTH_USERS_AUTH: + external: true diff --git a/docker-compose.emg.yml b/docker-compose.emg.yml index 107b33a93be54cf8dafe04c36af94b6f82f3a2f8..06da63290791105ef1a8444bf999f7a3225fa6c8 100644 --- a/docker-compose.emg.yml +++ b/docker-compose.emg.yml @@ -132,7 +132,7 @@ services: OS_PASSWORD_DOWNLOAD_FILE: "/run/secrets/OS_PASSWORD_DOWNLOAD" configs: - source: preprocessor-config - target: /config.yaml + target: /config.yaml deploy: replicas: 1 networks: @@ -170,6 +170,8 @@ services: configs: - source: init-db target: /init-db.sh + - source: registrar-config + target: /config.yaml deploy: replicas: 1 networks: @@ -194,7 +196,7 @@ services: target: /etc/sftp/users.conf ports: - - "2222:22" + - "2223:22" deploy: replicas: 1 configs: @@ -212,6 +214,8 @@ configs: file: ./config/emg_index-ops.html preprocessor-config: file: ./config/emg_preprocessor-config.yml + registrar-config: + file: ./config/emg_registrar-config.yml volumes: db-data: redis-data: diff --git a/docker-compose.logging.dev.yml b/docker-compose.logging.dev.yml index d749cb97edd584b85c8bbe46b708d06e2653ee1f..38af28cf99c70815163231a8c13e91e5798c8e4b 100644 --- a/docker-compose.logging.dev.yml +++ b/docker-compose.logging.dev.yml @@ -1,5 +1,7 @@ version: "3.6" services: + fluentd: + image: registry.gitlab.eox.at/esa/prism/vs/fluentd:dev elasticsearch: ports: - "9200:9200" diff --git a/docker-compose.logging.ops.yml b/docker-compose.logging.ops.yml index 53434247c97cc5ba62880d1ca8eafc2a81731d0c..2a6437f7bb7efb509dc0558bb113a1e80966785d 100644 --- a/docker-compose.logging.ops.yml +++ b/docker-compose.logging.ops.yml @@ -1,11 +1,12 @@ version: "3.6" +x-vs-version: :release-1.0.0-rc.2 # bumpversion services: fluentd: + image: registry.gitlab.eox.at/esa/prism/vs/fluentd:release-1.0.0-rc.2 # bumpversion deploy: placement: # this is not strictly required, but feels right constraints: [node.role == manager] - elasticsearch: environment: - bootstrap.memory_lock=true diff --git a/docker-compose.vhr18.ops.yml b/docker-compose.vhr18.ops.yml index fef2a0d2f987e35fc781a29fb376f2c78584bcd9..c0baef92e4d72f0a9d52a8ae7bb2a7544eb9e581 100644 --- a/docker-compose.vhr18.ops.yml +++ b/docker-compose.vhr18.ops.yml @@ -1,4 +1,5 @@ version: "3.6" +x-vs-version: :release-1.0.0-rc.2 # bumpversion services: database: volumes: @@ -7,22 +8,23 @@ services: tmpfs: size: 536870912 renderer: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:release-1.0.0-rc.2 # bumpversion environment: INSTALL_DIR: "/var/www/pvs/ops/" INSTANCE_DIR: "/var/www/pvs/ops/pvs_instance/" deploy: replicas: 3 labels: - # router for basic auth based access (https) - - "traefik.http.routers.vhr18-renderer.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" - - "traefik.http.routers.vhr18-renderer.middlewares=auth@file,compress@file,cors@file" - - "traefik.http.routers.vhr18-renderer.tls=true" - - "traefik.http.routers.vhr18-renderer.tls.certresolver=default" - - "traefik.http.routers.vhr18-renderer.entrypoints=https" - # router for basic auth based access (http) - - "traefik.http.routers.vhr18-renderer-redirect.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" - - "traefik.http.routers.vhr18-renderer-redirect.middlewares=redirect@file" - - "traefik.http.routers.vhr18-renderer-redirect.entrypoints=http" + # router for shib auth based access (https) + - "traefik.http.routers.vhr18-renderer-shib.rule=Host(`vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.vhr18-renderer-shib.middlewares=shibAuth@file,compress@file,cors@file" + - "traefik.http.routers.vhr18-renderer-shib.tls=true" + - "traefik.http.routers.vhr18-renderer-shib.tls.certresolver=default" + - "traefik.http.routers.vhr18-renderer-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.vhr18-renderer-redirect-shib.rule=Host(`vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.vhr18-renderer-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.vhr18-renderer-redirect-shib.entrypoints=http" # router for referrer based access (https) - "traefik.http.routers.vhr18-renderer_referer.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|vhr18.pdas.prism.eox.at|vhr18.pass.copernicus.eu)/?`)" - "traefik.http.routers.vhr18-renderer_referer.middlewares=compress@file,cors@file" @@ -33,6 +35,16 @@ services: - "traefik.http.routers.vhr18-renderer_referer-redirect.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|vhr18.pdas.prism.eox.at|vhr18.pass.copernicus.eu)/?`)" - "traefik.http.routers.vhr18-renderer_referer-redirect.middlewares=redirect@file" - "traefik.http.routers.vhr18-renderer_referer-redirect.entrypoints=http" + # router for basic auth based access (https) + - "traefik.http.routers.vhr18-renderer.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.vhr18-renderer.middlewares=auth@file,compress@file,cors@file" + - "traefik.http.routers.vhr18-renderer.tls=true" + - "traefik.http.routers.vhr18-renderer.tls.certresolver=default" + - "traefik.http.routers.vhr18-renderer.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.vhr18-renderer-redirect.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.vhr18-renderer-redirect.middlewares=redirect@file" + - "traefik.http.routers.vhr18-renderer-redirect.entrypoints=http" # general - "traefik.http.services.vhr18-renderer.loadbalancer.sticky=false" - "traefik.http.services.vhr18-renderer.loadbalancer.server.port=80" @@ -48,22 +60,23 @@ services: networks: - extnet cache: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_cache:release-1.0.0-rc.2 # bumpversion configs: - source: mapcache-ops target: /mapcache-template.xml deploy: labels: - "traefik.http.middlewares.cache-stripprefix.stripprefix.prefixes=/cache" - # router for basic auth based access (https) - - "traefik.http.routers.vhr18-cache.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/cache`)" - - "traefik.http.routers.vhr18-cache.middlewares=auth@file,cache-stripprefix,compress@file,cors@file" - - "traefik.http.routers.vhr18-cache.tls=true" - - "traefik.http.routers.vhr18-cache.tls.certresolver=default" - - "traefik.http.routers.vhr18-cache.entrypoints=https" - # router for basic auth based access (http) - - "traefik.http.routers.vhr18-cache-redirect.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/cache`)" - - "traefik.http.routers.vhr18-cache-redirect.middlewares=redirect@file" - - "traefik.http.routers.vhr18-cache-redirect.entrypoints=http" + # router for shib auth based access (https) + - "traefik.http.routers.vhr18-cache-shib.rule=Host(`vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/cache`)" + - "traefik.http.routers.vhr18-cache-shib.middlewares=shibAuthCache@file,cache-stripprefix,compress@file,cors@file" + - "traefik.http.routers.vhr18-cache-shib.tls=true" + - "traefik.http.routers.vhr18-cache-shib.tls.certresolver=default" + - "traefik.http.routers.vhr18-cache-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.vhr18-cache-redirect-shib.rule=Host(`vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/cache`)" + - "traefik.http.routers.vhr18-cache-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.vhr18-cache-redirect-shib.entrypoints=http" # router for referrer based access (https) - "traefik.http.routers.vhr18-cache_referer.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/cache`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|vhr18.pdas.prism.eox.at|vhr18.pass.copernicus.eu)/?`)" - "traefik.http.routers.vhr18-cache_referer.middlewares=cache-stripprefix,compress@file,cors@file" @@ -74,6 +87,16 @@ services: - "traefik.http.routers.vhr18-cache_referer-redirect.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/cache`) && HeadersRegexp(`Referer`, `(https?://)?(panda.copernicus.eu|panda.cdsv3.eu|panda-demo.ondaprism.eu|panda-demo.copernicus.eu|cdsportal-demo.copernicus.eu|ocqc-demo.copernicus.eu|spdm-intservices.cds.esa.int|spdm-intservices-adm.cds.esa.int|vhr18.pdas.prism.eox.at|vhr18.pass.copernicus.eu)/?`)" - "traefik.http.routers.vhr18-cache_referer-redirect.middlewares=redirect@file" - "traefik.http.routers.vhr18-cache_referer-redirect.entrypoints=http" + # router for basic auth based access (https) + - "traefik.http.routers.vhr18-cache.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`) && PathPrefix(`/cache`)" + - "traefik.http.routers.vhr18-cache.middlewares=auth@file,cache-stripprefix,compress@file,cors@file" + - "traefik.http.routers.vhr18-cache.tls=true" + - "traefik.http.routers.vhr18-cache.tls.certresolver=default" + - "traefik.http.routers.vhr18-cache.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.vhr18-cache-redirect.rule=Host(`vhr18.pdas.prism.eox.at`, `a.vhr18.pdas.prism.eox.at`, `b.vhr18.pdas.prism.eox.at`, `c.vhr18.pdas.prism.eox.at`, `d.vhr18.pdas.prism.eox.at`, `e.vhr18.pdas.prism.eox.at`, `f.vhr18.pdas.prism.eox.at`, `g.vhr18.pdas.prism.eox.at`, `h.vhr18.pdas.prism.eox.at`) && PathPrefix(`/cache`)" + - "traefik.http.routers.vhr18-cache-redirect.middlewares=redirect@file" + - "traefik.http.routers.vhr18-cache-redirect.entrypoints=http" # general - "traefik.http.services.vhr18-cache.loadbalancer.sticky=false" - "traefik.http.services.vhr18-cache.loadbalancer.server.port=80" @@ -90,6 +113,7 @@ services: networks: - extnet registrar: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_core:release-1.0.0-rc.2 # bumpversion environment: INSTALL_DIR: "/var/www/pvs/ops/" INSTANCE_DIR: "/var/www/pvs/ops/pvs_instance/" @@ -97,21 +121,32 @@ services: replicas: 0 placement: constraints: - - node.labels.type == internal + - node.labels.type == internal client: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_client:release-1.0.0-rc.2 # bumpversion configs: - source: client-ops target: /usr/share/nginx/html/index.html deploy: labels: + # router for shib auth based access (https) + - "traefik.http.routers.vhr18-client-shib.rule=Host(`vhr18.pass.copernicus.eu`)" + - "traefik.http.routers.vhr18-client-shib.middlewares=shibAuthCache@file,compress@file" + - "traefik.http.routers.vhr18-client-shib.tls=true" + - "traefik.http.routers.vhr18-client-shib.tls.certresolver=default" + - "traefik.http.routers.vhr18-client-shib.entrypoints=https" + # router for shib auth based access (http) + - "traefik.http.routers.vhr18-client-redirect-shib.rule=Host(`vhr18.pass.copernicus.eu`)" + - "traefik.http.routers.vhr18-client-redirect-shib.middlewares=redirect@file" + - "traefik.http.routers.vhr18-client-redirect-shib.entrypoints=http" # router for basic auth based access (https) - - "traefik.http.routers.vhr18-client.rule=Host(`vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`)" - - "traefik.http.routers.vhr18-client.middlewares=auth@file,compress@file" + - "traefik.http.routers.vhr18-client.rule=Host(`vhr18.pdas.prism.eox.at`)" + - "traefik.http.routers.vhr18-client.middlewares=shibAuthCache@file,compress@file" - "traefik.http.routers.vhr18-client.tls=true" - "traefik.http.routers.vhr18-client.tls.certresolver=default" - "traefik.http.routers.vhr18-client.entrypoints=https" # router for basic auth based access (http) - - "traefik.http.routers.vhr18-client-redirect.rule=Host(`vhr18.pdas.prism.eox.at`, `vhr18.pass.copernicus.eu`)" + - "traefik.http.routers.vhr18-client-redirect.rule=Host(`vhr18.pdas.prism.eox.at`)" - "traefik.http.routers.vhr18-client-redirect.middlewares=redirect@file" - "traefik.http.routers.vhr18-client-redirect.entrypoints=http" # general @@ -122,10 +157,11 @@ services: - "traefik.enable=true" placement: constraints: - - node.labels.type == external + - node.labels.type == external networks: - extnet preprocessor: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:release-1.0.0-rc.2 # bumpversion volumes: - type: bind source: /var/vhr @@ -135,7 +171,89 @@ services: placement: constraints: - node.labels.type == internal + shibauth: + image: unicon/shibboleth-sp:3.0.4 + environment: + APACHE_SERVERNAME: "https://vhr18.pass.copernicus.eu:443" + secrets: + - source: VHR18_SHIB_CERT + target: SHIB_CERT + - source: VHR18_SHIB_KEY + target: SHIB_KEY + - BASIC_AUTH_USERS_AUTH + deploy: + replicas: 1 + placement: + constraints: [node.role == manager] + labels: + # router for basic auth based access (https) + - "traefik.http.routers.vhr18-shibauth.rule=Host(`vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/secure`, `/secure-cache`, `/Shibboleth.sso`)" + - "traefik.http.routers.vhr18-shibauth.middlewares=compress@file,cors@file" + - "traefik.http.routers.vhr18-shibauth.tls=true" + - "traefik.http.routers.vhr18-shibauth.tls.certresolver=default" + - "traefik.http.routers.vhr18-shibauth.entrypoints=https" + # router for basic auth based access (http) + - "traefik.http.routers.vhr18-shibauth-redirect.rule=Host(`vhr18.pass.copernicus.eu`, `a.vhr18.pass.copernicus.eu`, `b.vhr18.pass.copernicus.eu`, `c.vhr18.pass.copernicus.eu`, `d.vhr18.pass.copernicus.eu`, `e.vhr18.pass.copernicus.eu`, `f.vhr18.pass.copernicus.eu`, `g.vhr18.pass.copernicus.eu`, `h.vhr18.pass.copernicus.eu`) && PathPrefix(`/secure`, `/secure-cache`, `/Shibboleth.sso`)" + - "traefik.http.routers.vhr18-shibauth-redirect.middlewares=redirect@file" + - "traefik.http.routers.vhr18-shibauth-redirect.entrypoints=http" + # general + - "traefik.http.services.vhr18-shibauth.loadbalancer.sticky=false" + - "traefik.http.services.vhr18-shibauth.loadbalancer.server.port=80" + - "traefik.docker.network=vhr18-extnet" + - "traefik.docker.lbswarm=true" + - "traefik.enable=true" + networks: + - extnet + configs: + - source: shib-access-control-conf + target: /etc/shibboleth/pass-ac.xml + - source: shib-access-control-conf-cache + target: /etc/shibboleth/pass-ac-cache.xml + - source: shib-shibboleth2 + target: /etc/shibboleth/shibboleth2.xml + - source: shib-apache + target: /etc/httpd/conf.d/shib.conf + - source: shib-attribute-map + target: /etc/shibboleth/attribute-map.xml + - source: idp-metadata + target: /etc/shibboleth/idp-metadata.xml + - source: shib-index + target: /var/www/html/secure/index.html + - source: shib-index + target: /var/www/html/secure-cache/index.html + - source: shibd-logger + target: /etc/shibboleth/shibd.logger + - source: native-logger + target: /etc/shibboleth/native.logger + ingestor: + image: registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor:release-1.0.0-rc.2 # bumpversion networks: extnet: name: vhr18-extnet external: true +configs: + shib-access-control-conf: + file: ./config/shibboleth/vhr18-ac.xml + shib-access-control-conf-cache: + file: ./config/shibboleth/vhr18-ac-cache.xml + shib-shibboleth2: + file: ./config/shibboleth/vhr18-shibboleth2.xml + shib-apache: + file: ./config/shibboleth/shib-apache.conf + shib-attribute-map: + file: ./config/shibboleth/attribute-map.xml + native-logger: + file: ./config/shibboleth/native.logger + shibd-logger: + file: ./config/shibboleth/shibd.logger + shib-index: + file: ./config/shibboleth/index.html + idp-metadata: + external: true +secrets: + VHR18_SHIB_CERT: + external: true + VHR18_SHIB_KEY: + external: true + BASIC_AUTH_USERS_AUTH: + external: true diff --git a/docker-compose.vhr18.yml b/docker-compose.vhr18.yml index 72c3c5ddcef71d61cc21022ef0e9ffe6542c36bc..fe74cc7e622be0ba2e5534e63d2c5b5fa2429caa 100644 --- a/docker-compose.vhr18.yml +++ b/docker-compose.vhr18.yml @@ -135,7 +135,7 @@ services: OS_PASSWORD_DOWNLOAD_FILE: "/run/secrets/OS_PASSWORD_DOWNLOAD" configs: - source: preprocessor-config - target: /config.yaml + target: /config.yaml deploy: replicas: 1 networks: @@ -173,6 +173,8 @@ services: configs: - source: init-db target: /init-db.sh + - source: registrar-config + target: /config.yaml deploy: replicas: 1 networks: @@ -197,7 +199,6 @@ services: target: /etc/sftp/users.conf deploy: replicas: 1 - ports: - "2222:22" ingestor: @@ -219,6 +220,8 @@ configs: file: ./config/vhr18_index-ops.html preprocessor-config: file: ./config/vhr18_preprocessor-config.yml + registrar-config: + file: ./config/vhr18_registrar-config.yml volumes: db-data: redis-data: diff --git a/documentation/operator-guide/access.rst b/documentation/operator-guide/access.rst new file mode 100644 index 0000000000000000000000000000000000000000..9dafab5175a4373ef0963b65056062e15e2c619a --- /dev/null +++ b/documentation/operator-guide/access.rst @@ -0,0 +1,180 @@ +.. _access: + +Access +====== +This chapter describes general concepts of how external access to each component is provided and how authentication and authorization layer based on `Shibboleth SP3 `_. is configured. + +General overview +~~~~~~~~~~~~~~~~ +Each individual docker **stack** has its own internal network ``intnet`` where services can communicate between each other. This network is not exposed to the public and provides most of the necessary communication. Additionally external user access to some services (client, renderer, cache) is provided via external network ``extnet`` and reverse-proxy (traefik) with load balancer. + +These services can have a set of authentication and authorization rules applied both on traefik level and Shibboleth SP level. Kibana and Traefik dashboard are also accessible externally, but through a different set of default credentials. + + +Routing with traefik +~~~~~~~~~~~~~~~~~~~~ +``Reverse-proxy`` service in base stack provides central access endpoint to the VS. It exposes ports 80 and 443 for HTTP and HTTPS access. Configuration of the reverse-proxy is done on three places. + +First two are static and dynamic configuration files ``traefik.yml`` and ``traefik-dynamic.yml``. Static configuration sets up connections to providers and define the entrypoints that Traefik will listen to. Dynamic configuration defines how the requests are handled. This configuration can change and is seamlessly hot-reloaded, without any request interruption or connection loss. Third part are docker ``labels`` on individual services which Traefik provides access to. + +For example following configuration snippet enables access to certain paths of ``renderer`` service under a given hostname. It also sets externally set basic authentication and other rules via ``@file`` identifier, which references configurations from ``traefik-dynamic.yml``. + +.. code-block:: yaml + + renderer: + deploy: + labels: + # router for basic auth based access (https) + - "traefik.http.routers.vhr18-renderer.rule=Host(`vhr18.pdas.prism.eox.at`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.vhr18-renderer.middlewares=auth@file,compress@file,cors@file" + - "traefik.http.routers.vhr18-renderer.tls=true" + - "traefik.http.routers.vhr18-renderer.tls.certresolver=default" + - "traefik.http.routers.vhr18-renderer.entrypoints=https" + # general rules + - "traefik.http.services.vhr18-renderer.loadbalancer.sticky=false" + - "traefik.http.services.vhr18-renderer.loadbalancer.server.port=80" + - "traefik.docker.network=vhr18-extnet" + - "traefik.docker.lbswarm=true" + - "traefik.enable=true" + +An example of such auth@file configuration from ``traefik-dynamic.yml`` would be following snippet, where ``BASIC_AUTH_USERS_AUTH`` is referencing a docker secret configured earlier: + +.. code-block:: yaml + + http: + middlewares: + auth: + basicAuth: + realm: "PRISM View Server (PVS)" + usersFile: "/run/secrets/BASIC_AUTH_USERS_AUTH" + +Unsecured HTTP access is configured to be redirected to the HTTPS endpoint but inside the swarm among the services, only HTTP is used internally. + +Authentication and Authorization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Authentication of access to external parts of VS is made up of two options: + +- Traefik provided basic authentication - configured as ``file@auth`` and ``file@apiAuth`` + +Here, access on such endpoint requires basic authentication credentials (username, password) to be inserted, while web browsers are usually prompted for input. After inserting valid credentials, access is granted. + +- Shibboleth Service Provider 3 + Apache 2 instance, to which requests are forwarded by `Traefik ForwardAuth middleware `_. + +Middleware delegates the authentication to Shibboleth. If Shibboleth response code is 2XX, access is granted and the original request is performed. Otherwise, the response from the Shibboleth is returned. + +In order to authenticate with Shibboleth, a user must log in with valid credentials on the side of Identity Provider (IdP), if doing so, the IdP informs the SP about successful login, accompanied by relevant user attributes and a session is created for the user. SP then saves the information about a created session into a cookie and based on user attributes can authorize access to the services. If the user was already logged in, he is automatically + +Currently setting individual authorization rules on a ``Collection`` and ``Service`` level is possible with current approach. It is yet not clearly possible to separate viewing and download, as both of these parts are handled by ``renderer`` service. + +Configuration +~~~~~~~~~~~~~ +For correct configuration of Shibboleth SP3 on a new stack, several steps need to be done. Most of these configurations are usually done in the :ref:`initialization` step using ``pvs_starter`` tool. Still, it is advised to check following steps, understand them and change if necessary. +Briefly summarized, SP and IdP need to exchange metadata and certificates to trust each other, SP needs to know which attributes the IdP will be sending about the logged-in user and respective access-control rules are configured based on those attributes. Most of the configurations are done via docker configs defined in the docker compose + +- Create a pair of key, certificate using attached Shibboleth ``config/shibboleth/keygen.sh`` in the cloned vs repository and save them as respective docker secrets. + +.. code-block:: bash + + SPURL="https://emg.pass.copernicus.eu" # service initial access point made accessible by traefik + ./config/shibboleth/keygen.sh -h $SPURL -y 20 -e https://$SPURL/shibboleth -n sp-signing -f + docker secret create EMG_SHIB_CERT sp-signing-cert.pem + docker secret create _SHIB_KEY sp-signing-key.pem + +- Get IDP metadata and save it as a docker config. Also read the entityID of the IdP for further use in referencing it in your ``shibboleth2.xml`` configuration. + +.. code-block:: bash + + docker config create idp-metadata idp-metadata-received.xml + +- Configure Apache ServerName used inside the ``shibauth`` service by modifying ``APACHE_SERVERNAME`` environment variable of corresponding ``shibauth`` service in ``docker-compose..ops.yml``. This URL should resolve to the actual service URL. + +- Modify shibboleth2.xml content by setting your "entityID" in Additionally edit the "entityID" value inside ``SSO`` element to match the IdP "entityID". Note that "entityID" does not need to resolve to an actual service URL. + +.. code-block:: xml + + + + SAML2 + + + +- Deploy your shibauth service and exchange your SP metadata with the IdP provider and have them recognize your SP. Necessary metadata needs to be downloaded from url ``/Shibboleth.sso/Metadata``. + +- Get information about attributes provided by IdP and update ``config/shibboleth/attribute-map.xml`` by adding individual entries mapping ``name`` provided by IdP to ``id`` used by SP internally. Example configuration: + +.. code-block:: xml + + + + + + +- Create custom access rules based on these attributes and map these access controls to different internal apache routes to which Traefik ForwardAuth middleware will point. Access rules are created in ``config/shibboleth/-ac.xml``. + +Example of external Access control rules configuration: + +.. code-block:: xml + + + + .+ + + Privileged_Access Public_Access + + + + +- Check configured link between Apache configuration for ``shibauth`` service, access rules, Traefik ForwardAuth middleware and per-service Traefik labels. Following simplified examples show the links in more detail: + +``APACHE_SERVERNAME`` environment variable needs to be set and same as the hostname, that Traefik will be serving as main entry point. Part of docker compose of shibauth service in ``docker-compose.emg.ops.yml``: + +.. code-block:: yaml + + services: + shibauth: + environment: + APACHE_SERVERNAME: "https://emg.pass.copernicus.eu:443" + deploy: + labels: + - "traefik.http.routers.shibauth.rule=Host(`emg.pass.copernicus.eu`) && PathPrefix(`/secure`, `/Shibboleth.sso`)" + ... + +Relevant Apache configuration in ``config/shibboleth/shib-apache.conf``, enabling Shibboleth authentication and authorization of the ``/secure`` endpoint. + +.. code-block:: apacheconf + + + SetHandler shib + + + PassEnv APACHE_SERVERNAME + ServerName "${APACHE_SERVERNAME}" + + AuthType shibboleth + ShibRequestSetting requireSession 1 + Require shib-plugin /etc/shibboleth/pass-ac.xml + + ... + +Part of Traefik ForwardAuth middleware configuration from ``traefik-dynamic.yml``, defining the internal address pointing to the ``shibauth`` service and ``/secure`` endpoint in it: + +.. code-block:: yaml + + http: + middlewares: + shibAuth: + forwardAuth: + address: http://shibauth/secure + trustForwardHeader: true + +Part of renderer service Traefik labels from ``docker-compose.emg.ops.yml``, where access through the middleware is configured. + +.. code-block:: yaml + + services: + renderer: + deploy: + labels: + # router for shib auth based access (https) + - "traefik.http.routers.emg-renderer-shib.rule=Host(`emg.pass.copernicus.eu`) && PathPrefix(`/ows`, `/opensearch`, `/admin`)" + - "traefik.http.routers.emg-renderer-shib.middlewares=shibAuth@file" diff --git a/documentation/operator-guide/configuration.rst b/documentation/operator-guide/configuration.rst index c4cbddd6b4421293d8821b7d74e24d471fcb5435..dce3a837f6dcba86c2a197f794bccaf0c2180365 100644 --- a/documentation/operator-guide/configuration.rst +++ b/documentation/operator-guide/configuration.rst @@ -277,6 +277,8 @@ source/target Here, the source file storage and the target file storage are configured. This can either be a local directory or an OpenStack Swift object storage. + If Swift is used for source, download container can be left unset. In that case, + container can be inferred from the given path in format /. workdir @@ -397,7 +399,7 @@ preprocessing force_north_up - TODO + Circumvents the naming of corner names and assumes a north-up orientation of the image. tps @@ -494,7 +496,7 @@ preprocessing defaults. Sensitive variables -^^^^^^^^^^^^^^^^^^^ +------------------- Since environment variables include credentials that are considered sensitive, avoiding their exposure inside ``.env`` files would be the right practice. @@ -514,7 +516,7 @@ An example of creating configurations for sftp image using the following command .. code-block:: bash - printf ":::" | docker config create sftp-users - + printf ":::" | docker config create sftp-users- - An example of creating ``OS_PASSWORD`` as secret using the following command : @@ -531,5 +533,7 @@ An example of creating ``BASIC_AUTH_USERS_AUTH`` secret: htpasswd -nb user2 YyuN9bYRvBUUU6COx7itWw5qyyARus >> auth_list.txt docker secret create BASIC_AUTH_USERS_AUTH auth_list.txt +For configuration of the ``shibauth`` service, please consult a separate chapter :ref:`access`. + The next section :ref:`management` describes how an operator interacts with a deployed VS stack. diff --git a/documentation/operator-guide/images/kibana_1.png b/documentation/operator-guide/images/kibana_1.png new file mode 100644 index 0000000000000000000000000000000000000000..40efe5567ff541250adc10f91d1a23a1c96223b0 Binary files /dev/null and b/documentation/operator-guide/images/kibana_1.png differ diff --git a/documentation/operator-guide/images/kibana_2.png b/documentation/operator-guide/images/kibana_2.png new file mode 100644 index 0000000000000000000000000000000000000000..bfc60b5b3958778a3770c201d543dfc152b3cdd6 Binary files /dev/null and b/documentation/operator-guide/images/kibana_2.png differ diff --git a/documentation/operator-guide/index.rst b/documentation/operator-guide/index.rst index 249dfe69a680b8d7b323f4a674e9d65de7a1ec11..4aa4e923c76cd5fcb41800e9118066d60bcbf656 100644 --- a/documentation/operator-guide/index.rst +++ b/documentation/operator-guide/index.rst @@ -12,6 +12,7 @@ View Server - Operator Guide configuration management ingestion + access .. Indices and tables diff --git a/documentation/operator-guide/ingestion.rst b/documentation/operator-guide/ingestion.rst index 16be9b8cf531ba9df398debe962f7c313d9b7c27..456d9ca7fbbcdb51fb9168a5e052da1a9e0bf920 100644 --- a/documentation/operator-guide/ingestion.rst +++ b/documentation/operator-guide/ingestion.rst @@ -82,7 +82,7 @@ For a more concrete example the following command executes a .. code-block:: bash - redis-cli lpush preprocess_queue "/data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar" + redis-cli lpush preprocess_queue "data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar" Usually, with a preprocessor service running and no other items in the ``preprocess_queue`` this value will be immediately popped from the list and @@ -92,7 +92,7 @@ of the ``preprocess_queue``: .. code-block:: bash $ redis-cli lrange preprocess_queue 0 -1 - /data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar + data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar Now that the product is being preprocessed, it should be visible in the ``preprocessing_set``. As the name indicates, this is using the ``Set`` @@ -101,7 +101,7 @@ datatype, thus requiring the ``SMEMBERS`` subcommand to list: .. code-block:: bash $ redis-cli smembers preprocessing_set 0 -1 - /data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar + data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar Once the preprocessing of the product is finished, the preprocessor will remove the currently worked on path from the ``preprocessing_set`` and add it either @@ -130,6 +130,27 @@ added to the ``registering_set``, afterwards the path is placed to either the sets can be inspected by the ``LRANGE`` or ``SMEMBERS`` subcommands respectively. +Ingestor and sftp +~~~~~~~~~~~~~~~~~ +Triggering preprocessing and registration via pushing to the redis queues is very convenient for single ingestion campaigns, but not optimal for continuous ingestion of new products from "live" sources. +``Ingestor`` service, together optionally with ``sftp`` service allow data ingestion to be initiated by external means. + +``Ingestor`` can work in two modes: + +- Default: Exposing a simple ``/`` endpoint, and listening for ``POST`` requests containing ``data`` with either a Browse Report JSON or a string with path to the object storage with product to be ingested. It then parses this informatio and internally puts it into configured redis queue (preprocess or register). +- Alternative: Listening for newly added Browse Report files on a configured path on a file system via ``inotify``. +These Browse Report files need to be in an agreed XML schema to be correctly handled. +``Sftp`` service enables a secure access to a configured folder via sftp, while this folder can be mounted to other vs services. This way, ``Ingestor`` can listen for newly created files by the sftp access. +If the filedaemon alternative mode should be used, ``INOTIFY_WATCH_DIR`` environment variable needs to be set and a ``command`` used in the docker-compose..ops.yml for ``ingestor`` service needs to be set to ``python3 filedaemon.py``: + +.. code-block:: yaml + + ingestor: + environment: + REDIS_PREPROCESS_MD_QUEUE_KEY: "preprocess_queue" # to override md_queue (json) and instead use (string) + command: + ["python3", "/filedaemon.py"] + Direct Data Management ---------------------- @@ -162,12 +183,23 @@ it is passed as a command line argument, which is then processed normally. .. code-block:: bash - python3 /preprocessor.py \ - --mode standard \ - --replace \ - --tar-object-path /data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar + preprocess \ + --config-file /preprocessor_config.yml \ + --validate \ + --use-dir /tmp \ + data25/OA/PL00/1.0/00/urn:eop:DOVE:MULTISPECTRAL_4m:20180811_081455_1054_3be7/0001/PL00_DOV_MS_L3A_20180811T081455_20180811T081455_TOU_1234_3be7.DIMA.tar + +In order to preprocess a ngEO Ingest Browse Report, an additonal ``--browse-report`` parameter needs to be added: -In this mode, the item will not be placed in the resulting set +.. code-block:: bash + + preprocess \ + --config-file /preprocessor_config.yml \ + --browse-report \ + --use-dir /tmp \ + browse_report_test1.json + +In this "one-off" mode, the item will not be placed in the resulting set (``preprocessing_set``, ``preprocess-success_set``, and ``preprocess-failure_set``). @@ -273,3 +305,14 @@ Deregistration .. code-block:: bash manage.py coverage deregister "${product_id}_coverage" + +Preprocessing vs registration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The preprocessing step aims to ensure that cloud optimized GeoTIFF (COG) files are created in order to significantly speed up the viewing of large amount of data in lower zoom levels. There are several cases, where such preprocessing is not necessary or wanted. + +- If data are already in COGs and in favorable projection, which will be presented to the user for most of the times, direct registration should be used. This means, paths to individual products will be pushed directly to the register-queue. + +- Also for cases, where preprocessing step would take too much time, direct registration allowing access to the metadata and catalog functions, while justifying slower rendering times can be preferred. + +Next chapter :ref:`access` describes used authorization and authentication concepts and lines out how the external access to individual components and service as such is configured. \ No newline at end of file diff --git a/documentation/operator-guide/intro.rst b/documentation/operator-guide/intro.rst index 3d2975e249c888b93d327f7b21b7a8eec9a716e2..6e8184953e672efa72ed24cd507b58f932a80b93 100644 --- a/documentation/operator-guide/intro.rst +++ b/documentation/operator-guide/intro.rst @@ -32,6 +32,9 @@ respective Docker image in parenthesis): - Database (postgis) - Queue Manager (redis) - Log collector (fluentd) +- Kibana (kibana) +- Elasticsearch (elasticsearch) +- Shibboleth SP3 (unicon/shibboleth-sp) These services are bundled and managed together in a Docker Swarm via Docker Compose configuration files. @@ -50,12 +53,17 @@ the used images: - mdillon/postgis:10 - redis -- traefik:2.1 -- fluent/fluentd -- registry.gitlab.eox.at/esa/prism/vs/pvs_core:latest -- registry.gitlab.eox.at/esa/prism/vs/pvs_cache:latest -- registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor:latest -- registry.gitlab.eox.at/esa/prism/vs/pvs_client:latest +- traefik +- elasticsearch +- kibana +- unicon/shibboleth-sp +- atmoz/sftp +- registry.gitlab.eox.at/esa/prism/vs/fluentd +- registry.gitlab.eox.at/esa/prism/vs/pvs_core +- registry.gitlab.eox.at/esa/prism/vs/pvs_cache +- registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor +- registry.gitlab.eox.at/esa/prism/vs/pvs_client +- registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor Configuration Files ------------------- diff --git a/documentation/operator-guide/management.rst b/documentation/operator-guide/management.rst index b7fcc9a070241619b7fe179271b05bb83f184134..181866b9b7ed1e4996c631ceb101d50c48f475a8 100644 --- a/documentation/operator-guide/management.rst +++ b/documentation/operator-guide/management.rst @@ -62,9 +62,8 @@ shutting down of the stack and new deployment. Inspecting reports ------------------ -Once registered, a xml report containes wcs and wms getcapabilities of the registered product is generated and can be accessed by sftp into `SFTP` image, - -In order to log into the logging folders through port 2222 on the hosting ip (e.g. localhost if you are running the dev stack ) The following command can be used: +Once a product is registered, a xml report containing wcs and wms getcapabilities of the registered product is generated and can be accessed by connecting to the `SFTP` service via the sftp protocol. +In order to log into the logging folders through port 2222 (for ``vhr18``, ``emg`` and ``dem`` have 2223 and 2224 respectively) on the hosting ip (e.g. localhost if you are running the dev stack) The following command can be used: .. code-block:: bash @@ -74,8 +73,8 @@ this will direct the user into `/home//data` directory which contains .. Note:: The mounted directory that the user is directed into is *`/home/user`*, where `user` is the username, hence when changing the username in the `.conf` file, the `sftp` mounted volumes path in `docker-compose..yml` must be changed respectively. -Inspecting logs ---------------- +Inspecting logs in development +------------------------------ All service components are running inside docker containers and it is therefore possible to inspect the logs for anomalies via standard docker logs calls redirected for example to less command to allow paging through them. @@ -95,8 +94,36 @@ It is possible to show logs of all containers belonging to a service from a mast docker service logs _ -t 2>&1 | sort -k 1 2>&1 | tail -n 2>&1 | less -The docker service logs is intended as a quick way to view the latest log entries of all tasks of a service, but should not be used as a main way to collect these logs. For that it would be appropriate to use a logging driver, and extract the logs from the nodes to a central log aggregator. -The docker service logs is intended as a quick way to view the latest log entries of all tasks of a service, but should not be used as a main way to collect these logs. For that it would be appropriate to use a logging driver, and extract the logs from the nodes to a central log aggregator. +The docker service logs is intended as a quick way to view the latest log entries of all tasks of a service, but should not be used as a main way to collect these logs. For that, on production setup, an additional EFK (Elasticsearch, Fluentd, Kibana) stack is deployed. + +Inspecting logs in production +----------------------------- + +Fluentd is configured as main logging driver of the Docker daemon on Virtual machine level. Therefore for other services to run, Fluentd service must be running too. To access the logs, interactive and multi-purpose Kibana interface is available and exposed externally by traefik. + +For simple listing of the filtered time-sorted logs as an equivalent to `docker service logs` command, a basic ``Discover`` app can be used. The main panel to interact with the logs is the ``Search`` bar, allowing filtered field-data and free-text searches, modyfing time range etc. The individual log results will then appear in the ``Document table`` panel in the bottom of the page. + + +.. _fig_kibana_discover: +.. figure:: images/kibana_2.* + :alt: Kibana discover + + *Kibana discover panel* + +For specific help with ``Discover`` panel, please consult `Kibana official documentation `_ + +In order to select any other option from the Kibana toolkit, click the horizontal lines selection on the top left and pick a tool. + +.. _fig_kibana_menu: +.. figure:: images/kibana_1.* + :width: 250 + :alt: Kibana menu + + *Kibana menu* + +Kibana also allows to aggregate log data based on a search query in two modes of operation: ``Bucketing`` and ``Metrics`` being applied on all buckets. + +These aggregations then are used in ``Visualisations`` with various chart modes like vertical bar chart, horizontal line chart. Using saved searches improves the performance of the charts due to limiting the results list. Increasing logging level ------------------------ @@ -112,7 +139,9 @@ A restart of respective service for the change to be applied is also necessary. cd ${INSTALL_DIR}/pvs_instance sed -i 's/DEBUG = False/DEBUG = True/g' settings.py -In order to increase logging level of registrar and preprocessor services to `debug`, the respective Python scripts need to be run with an optional parameter **-v 4**. +In order to increase logging level of registrar and preprocessor services to `DEBUG`, the respective Python commands need to be run with an optional parameter **--debug**. + +Ingestor service by default logs its messages in DEBUG mode. The cache services internally uses a Mapcache software, which usually incorporates an Apache 2 HTTP Server. Due to that, logging level is shared throughout the whole service and is based on Apache `.conf` file, which is stored in $APACHE_CONF environment variable. To change the logging level, edit this file, by setting a **LogLevel debug** and then gracefully restart the Apache component (this way, the cache service itself will not restart and renew default configuration). diff --git a/documentation/operator-guide/setup.rst b/documentation/operator-guide/setup.rst index bb91533c06ab1e442c2ed932fd03f589be0d94d2..1ccaea907da87ee5d196d2d0540a8b24a1f3ada9 100644 --- a/documentation/operator-guide/setup.rst +++ b/documentation/operator-guide/setup.rst @@ -113,8 +113,10 @@ Now the relevant images can be pulled: docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_cache docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_preprocessor docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_client + docker pull registry.gitlab.eox.at/esa/prism/vs/fluentd + docker pull registry.gitlab.eox.at/esa/prism/vs/pvs_ingestor + -.. # TODO: ingestor image? Logging @@ -138,9 +140,13 @@ container. Stack Deployment ---------------- -Now that a Docker Swarm is established, it is time to deploy the VS as a stack. -This is done using the created Docker Compose configuration files. In order to -enhance the re-usability, these files are split into multiple parts to be used +Before the stack deployment step, some environment variables and configurations -which +are considered sensitive- should be created beforehand, this can done following +the ``Sensitive variables`` steps that are included in the next :ref:`configuration` section. + +Now that a Docker Swarm is established and docker secrets and configs are created, +it is time to deploy the VS as a stack. This is done using the created Docker Compose +configuration files. In order to enhance the re-usability, these files are split into multiple parts to be used for both development and final service deployment. For a development deployment one would do (replace ``name`` with the actual diff --git a/documentation/user-guide/Makefile b/documentation/user-guide/Makefile index e994f10c0d30135b07fa08066c43f7e4e454b949..0f20b85a8014130c9902c0afe88de285c8b83cab 100644 --- a/documentation/user-guide/Makefile +++ b/documentation/user-guide/Makefile @@ -8,7 +8,7 @@ SPHINXBUILD ?= sphinx-build SPHINXAUTOBUILD ?= sphinx-autobuild SOURCEDIR = . BUILDDIR = _build -USERGUIDE_VERSION = v1.1.0 +USERGUIDE_VERSION = v1.1.1 # Put it first so that "make" without argument is like "make help". help: diff --git a/fluentd/Dockerfile b/fluentd/Dockerfile index fb2dfae9469b57e2afd689b7b9b28b7cdee3432c..0bc13cb9a9e897b37f524c25cf252de1618dfb0a 100644 --- a/fluentd/Dockerfile +++ b/fluentd/Dockerfile @@ -32,7 +32,7 @@ LABEL name="prism view server cache" \ vendor="EOX IT Services GmbH " \ license="MIT Copyright (C) 2019 EOX IT Services GmbH " \ type="prism view server fluentd" \ - version="0.0.1-dev" + version="1.0.0-rc.2" USER root RUN gem install fluent-plugin-elasticsearch \ diff --git a/ingestor/Dockerfile b/ingestor/Dockerfile index 61afce217f6e28ee66023eadb0cd0047336f68b4..e1b80dcf0380cee38ec5daedebeaa3030b6499a5 100644 --- a/ingestor/Dockerfile +++ b/ingestor/Dockerfile @@ -32,7 +32,7 @@ LABEL name="prism view server cache" \ vendor="EOX IT Services GmbH " \ license="MIT Copyright (C) 2020 EOX IT Services GmbH " \ type="prism view server ingestor" \ - version="0.0.1-dev" + version="1.0.0-rc.2" USER root ADD install.sh requirements.txt \ diff --git a/preprocessor/Dockerfile b/preprocessor/Dockerfile index b7da584f7aa2e019516b9c64c87342ce2f155486..987d52b45bdba74919d3ded16be968d27bdff789 100644 --- a/preprocessor/Dockerfile +++ b/preprocessor/Dockerfile @@ -32,7 +32,7 @@ LABEL name="prism view server preprocessor" \ vendor="EOX IT Services GmbH " \ license="MIT Copyright (C) 2019 EOX IT Services GmbH " \ type="prism view server preprocessor" \ - version="0.0.1-dev" + version="1.0.0-rc.2" ENV LC_ALL=C.UTF-8 ENV LANG=C.UTF-8 diff --git a/preprocessor/setup.py b/preprocessor/setup.py index 56f894a93464471f2c79817b005ab906dcf5bf73..34c7a2e8ab38a7a4de69816040821b4ed3cde398 100644 --- a/preprocessor/setup.py +++ b/preprocessor/setup.py @@ -6,7 +6,7 @@ long_description = "" setup( name="preprocessor", # Replace with your own username - version="0.0.1", + version="1.0.0-rc.2", author="", author_email="", description="preprocessor for PVS", diff --git a/testing/Dockerfile b/testing/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..c210be98b71cc94acbfe839b59e6e5518088b0ca --- /dev/null +++ b/testing/Dockerfile @@ -0,0 +1,35 @@ +#------------------------------------------------------------------------------ +# +# Project: prism view server +# Authors: Stephan Meissl +# +#------------------------------------------------------------------------------ +# Copyright (C) 2020 EOX IT Services GmbH +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies of this Software or works derived from this Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. +#----------------------------------------------------------------------------- + +# this image is built manually and pushed to registry.gitlab.eox.at/esa/prism/vs/docker-base-testing + +FROM docker:19.03.13-dind + +RUN apk update && apk add bash postgresql-dev gcc g++ python3 python3-dev musl-dev py-pip libffi-dev openssl-dev make gdal==3.1.4-r0 gdal-dev==3.1.4-r0 + +ADD requirements.txt / +RUN pip3 install -r requirements.txt diff --git a/testing/gitlab_test.sh b/testing/gitlab_test.sh index 8ce6b51f86676d54f807a2171f68c39b231b2d3c..4503809d4a1a9c87a4b7dafadabdeb2555d5c530 100755 --- a/testing/gitlab_test.sh +++ b/testing/gitlab_test.sh @@ -9,7 +9,6 @@ cat $emg_db > ../env/emg_db.env cat $emg_django > ../env/emg_django.env cat $emg_obs > ../env/emg_obs.env - # use `pvs_testing` bucket instead sed -i -e 's/emg-data/pvs_testing/g' ../env/emg.env @@ -23,7 +22,6 @@ set -o allexport source ../env/emg.env set +o allexport - mkdir data docker swarm init docker network create -d overlay emg-extnet @@ -33,16 +31,11 @@ printf $OS_PASSWORD_DOWNLOAD | docker secret create OS_PASSWORD_DOWNLOAD - printf $DJANGO_PASSWORD | docker secret create DJANGO_PASSWORD - printf $OS_PASSWORD | docker secret create OS_PASSWORD - -# create docker configs +# create docker configs printf $sftp_users_emg | docker config create sftp_users_emg - - docker stack deploy -c ../docker-compose.emg.yml -c ../docker-compose.emg.dev.yml emg-pvs -# installing the requirments -apk update && apk add bash postgresql-dev gcc g++ python3 python3-dev musl-dev py-pip libffi-dev openssl-dev make gdal==3.1.4-r0 gdal-dev==3.1.4-r0 -pip3 install -r requirements.txt - -./docker-stack-wait.sh -n renderer -n registrar -n preprocessor -n ingestor -n sftp emg-pvs +./docker-stack-wait.sh -n renderer -n registrar -n preprocessor -n database -n sftp emg-pvs docker service ls # perform the testing @@ -53,10 +46,9 @@ if [ $? -ne 0 ] then echo "Failure in tests, logging from services:" for service in $(docker service ls --format "{{.Name}}"); do - echo "________________________________________" - docker service ps $service --no-trunc - docker service logs $service - done + docker service ps $service --no-trunc >> "/tmp/$service.log" + docker service logs $service >> "/tmp/$service.log" + done exit 1 fi diff --git a/testing/preprocessor_test.sh b/testing/preprocessor_test.sh index 60334fb9ca8ff7890ee9659d02072d816ef0c8f9..c86f3e8493e921a77310419998aa012e46fffce5 100755 --- a/testing/preprocessor_test.sh +++ b/testing/preprocessor_test.sh @@ -1,5 +1,6 @@ #!/bin/bash product_list_file=$1 +echo "Starting preprocessor test" while read product; do echo $product diff --git a/testing/registrar_test.py b/testing/registrar_test.py index b53da07c15de0f0aeb866f4ccf9e40d55acb2bc3..5a8397f32f8d16e728b83472ad7536dfacaaad6f 100644 --- a/testing/registrar_test.py +++ b/testing/registrar_test.py @@ -34,7 +34,7 @@ def identifiers(): def sftp_connection(): username = os.environ['sftp_users_emg'].split(':')[0] password = os.environ['sftp_users_emg'].split(':')[1] - transport = paramiko.Transport(('docker', 2222)) + transport = paramiko.Transport(('docker', 2223)) transport.connect(username=username, password=password) with paramiko.SFTPClient.from_transport(transport) as sftp: yield sftp diff --git a/testing/registrar_test.sh b/testing/registrar_test.sh index 1d3e0aae949070ee26ac6c741be58b5eb0a6fe08..ef398367d57462a9a70d89422452c17b82ed6d17 100755 --- a/testing/registrar_test.sh +++ b/testing/registrar_test.sh @@ -1,18 +1,15 @@ #!/bin/bash product_list_file=$1 -echo "inside registrar_test" - -OS_PASSWORD=$(docker exec -i $(docker ps -qf "name=emg-pvs_registrar") cat /run/secrets/OS_PASSWORD) +echo "Starting registrar test" IFS="," - +docker exec -i $(docker ps -qf "name=emg-pvs_registrar") /wait-initialized.sh +docker exec -i $(docker ps -qf "name=emg-pvs_renderer") /wait-initialized.sh while read product; do docker exec -e OS_PASSWORD=$OS_PASSWORD -i $(docker ps -qf "name=emg-pvs_registrar") \ - python3 /registrar.py \ - --objects-prefix $product \ - --service-url $SERVICE_URL \ - --reporting-dir "/mnt/reports" \ - <<<$product + registrar register \ + --config-file /config.yaml \ + "$product" done < "$product_list_file" diff --git a/traefik-dynamic.yml b/traefik-dynamic.yml index c839637c84bfe949813a78412634147b5a201955..932dc5fc0872f5469ba274790c91dab0733bcdb1 100644 --- a/traefik-dynamic.yml +++ b/traefik-dynamic.yml @@ -18,6 +18,14 @@ http: basicAuth: realm: "PRISM View Server (PVS)" usersFile: "/run/secrets/BASIC_AUTH_USERS_AUTH" + shibAuth: + forwardAuth: + address: http://shibauth/secure + trustForwardHeader: true + shibAuthCache: + forwardAuth: + address: http://shibauth/secure-cache + trustForwardHeader: true compress: compress: {} redirect: diff --git a/traefik.yml b/traefik.yml index 2986bbf23539296df30957b5adb8376143d77adf..39a93c19019a09cfdd7e5d5b0e7413b65494ec5b 100644 --- a/traefik.yml +++ b/traefik.yml @@ -19,7 +19,7 @@ providers: api: dashboard: true log: - level: WARN + level: INFO accessLog: {} certificatesResolvers: default: