diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 25357e679999f4de870690aa1c2428faa5492691..bf885f028582bd9b02aac311165d5f7077aaa2b3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,8 +33,7 @@ build-master: - IMAGE_5="$CI_REGISTRY_IMAGE/fluentd" - docker pull "$IMAGE_5":latest || true - docker build --cache-from "$IMAGE_5":latest -t "$IMAGE_5":latest -t "$IMAGE_5":$VERSION_5 fluentd/ - - chmod +x gitlab_test.sh - - ./gitlab_test.sh + - cd ./testing && ./gitlab_test.sh && cd - - docker push "$IMAGE_1":$VERSION_1 - docker push "$IMAGE_1":latest - docker push "$IMAGE_2":$VERSION_2 @@ -76,8 +75,7 @@ build: - docker pull "$IMAGE":latest || true - docker build --cache-from "$IMAGE":latest -t "$IMAGE":$CI_COMMIT_REF_SLUG -t "$IMAGE":$CI_COMMIT_REF_NAME fluentd/ - docker tag "$IMAGE:$CI_COMMIT_REF_NAME" "$IMAGE:latest" - - chmod +x gitlab_test.sh - - ./gitlab_test.sh + - cd ./testing && ./gitlab_test.sh && cd - except: - master - + diff --git a/config/emg_init-db.sh b/config/emg_init-db.sh index 73f9c1129b1767c66ccd09c6e920e143190b9328..5944a22d495d77b97a2b2e7dfa9cee068742c984 100644 --- a/config/emg_init-db.sh +++ b/config/emg_init-db.sh @@ -92,7 +92,7 @@ if python3 manage.py id check "${COLLECTION}"; then --blue-range 0 255 \ --red-nodata 0 \ --green-nodata 0 \ - --blue-nodata 0 + --blue-nodata 0 # EQ02_3 python3 manage.py producttype create "${COLLECTION}"_Product_EQ02_3 --traceback \ --coverage-type "RGB" @@ -125,7 +125,7 @@ if python3 manage.py id check "${COLLECTION}"; then --blue-range 0 500 \ --red-nodata 0 \ --green-nodata 0 \ - --blue-nodata 0 + --blue-nodata 0 # EQ02_4 python3 manage.py producttype create "${COLLECTION}"_Product_EQ02_4 --traceback \ --coverage-type "RGBNir" @@ -204,7 +204,7 @@ if python3 manage.py id check "${COLLECTION}"; then --blue-range 0 800 \ --red-nodata 0 \ --green-nodata 0 \ - --blue-nodata 0 + --blue-nodata 0 # EW02_4 python3 manage.py producttype create "${COLLECTION}"_Product_EW02_4 --traceback \ --coverage-type "RGBNir" @@ -307,7 +307,7 @@ if python3 manage.py id check "${COLLECTION}"; then --blue-range 0 800 \ --red-nodata 0 \ --green-nodata 0 \ - --blue-nodata 0 + --blue-nodata 0 # EW03_4 python3 manage.py producttype create "${COLLECTION}"_Product_EW03_4 --traceback \ --coverage-type "RGBNir" @@ -851,19 +851,6 @@ if python3 manage.py id check "${COLLECTION}"; then python3 manage.py browsetype create "${COLLECTION}"_Product_SP07 "NDVI" --traceback \ --grey "(nir-red)/(nir+red)" --grey-range -1 1 # PH1A - python3 manage.py producttype create "${COLLECTION}"_Product_PH1A --traceback \ - --coverage-type "RGBNir" - python3 manage.py browsetype create "${COLLECTION}"_Product_PH1A --traceback \ - --red "red" \ - --green "green" \ - --blue "blue" \ - --red-range 1 1000 \ - --green-range 1 1000 \ - --blue-range 1 1000 \ - --red-nodata 0 \ - --green-nodata 0 \ - --blue-nodata 0 - # PH1A python3 manage.py producttype create "${COLLECTION}"_Product_PH1A --traceback \ --coverage-type "RGBNir" python3 manage.py browsetype create "${COLLECTION}"_Product_PH1A --traceback \ diff --git a/core/configure.sh b/core/configure.sh index 943475d4bc273ce64e6f5814babfe8679f9d1d80..3966e886e6f9e09c4c1a41c3fe2b1542466db9e5 100644 --- a/core/configure.sh +++ b/core/configure.sh @@ -51,4 +51,4 @@ chmod g+w -R . chgrp users -R . -} 1> &2 \ No newline at end of file +} 1>&2 diff --git a/core/entrypoint.sh b/core/entrypoint.sh index 8d82bbf7d54a26f1e90d89ec61f4beec452e216b..a8e57f88358df480cba3f7e99640f455eb84f427 100644 --- a/core/entrypoint.sh +++ b/core/entrypoint.sh @@ -5,7 +5,7 @@ TIMEOUT=${WAIT_TIMEOUT:='15'} if [[ ! -z $SERVICES ]] ; then for service in $SERVICES ; do - wait-for-it -t $TIMEOUT $service > &2 + wait-for-it -t $TIMEOUT $service >&2 done fi diff --git a/core/initialized.sh b/core/initialized.sh index f0fdcf300b33b7eaf55ee431b0e9120af632b2e7..07fc196a6d0925e93e78227c33bfef0b53635832 100644 --- a/core/initialized.sh +++ b/core/initialized.sh @@ -1,4 +1,4 @@ #!/bin/bash -e touch "${INSTANCE_DIR}/.initialized" -echo "Instance ${INSTANCE_ID} is initialized" > &2 +echo "Instance ${INSTANCE_ID} is initialized" >&2 diff --git a/core/run-httpd.sh b/core/run-httpd.sh index 30f876a5eb22d6b07381e9be28258a79b3b57b65..44498e3c1f6338dd0ecb4a49941038e544f44a49 100644 --- a/core/run-httpd.sh +++ b/core/run-httpd.sh @@ -2,4 +2,4 @@ echo "Running gunicorn" -exec gunicorn --chdir ${INSTALL_DIR}/pvs_instance/ --bind :80 pvs_instance.wsgi:application --workers 8 --max-requests 10 --max-requests-jitter 3 --worker-class sync --timeout 120 --access-logfile - --error-logfile - --log-level warning --disable-redirect-access-to-syslog 2> &1 +exec gunicorn --chdir ${INSTALL_DIR}/pvs_instance/ --bind :80 pvs_instance.wsgi:application --workers 8 --max-requests 10 --max-requests-jitter 3 --worker-class sync --timeout 120 --access-logfile - --error-logfile - --log-level warning --disable-redirect-access-to-syslog 2>&1 diff --git a/core/run-registrar.sh b/core/run-registrar.sh index 088f4bfa4b6cefb06e868ae6159021361d1de1bc..78b86b6b8e1fdfc0c184136a16200a90db5def27 100644 --- a/core/run-registrar.sh +++ b/core/run-registrar.sh @@ -1,5 +1,5 @@ #!/bin/sh -echo "Running registrar" > &2 +echo "Running registrar" >&2 -python3 /registrar.py --mode redis --redis-host ${REDIS_HOST} --redis-port ${REDIS_PORT} --redis-register-queue-key ${REDIS_REGISTER_QUEUE_KEY} --redis-registered-set-key ${REDIS_REGISTERED_SET_KEY} > &2 +python3 /registrar.py --mode redis --redis-host ${REDIS_HOST} --redis-port ${REDIS_PORT} --redis-register-queue-key ${REDIS_REGISTER_QUEUE_KEY} --redis-registered-set-key ${REDIS_REGISTERED_SET_KEY} >&2 diff --git a/core/wait-initialized.sh b/core/wait-initialized.sh index da9746eeb2f90aec198cb15c97580baa2117421a..95afa5bde5cc5642492125f53e914d0d8ddfa9a1 100644 --- a/core/wait-initialized.sh +++ b/core/wait-initialized.sh @@ -1,7 +1,7 @@ #!/bin/bash -e until [ -f "${INSTANCE_DIR}/.initialized" ] ; do - echo "Waiting until instance ${INSTANCE_ID} is initialized" > &2 + echo "Waiting until instance ${INSTANCE_ID} is initialized" >&2 sleep 3 # TODO: timeout? done diff --git a/docker-compose.dem.dev.yml b/docker-compose.dem.dev.yml index 3e213c68cebc843d445f5ec9c4110bd6cb44b28b..8ac49a6c078ec3cdc21e82686f8aee3543ea070f 100644 --- a/docker-compose.dem.dev.yml +++ b/docker-compose.dem.dev.yml @@ -9,8 +9,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" client: ports: - "80:80" @@ -21,8 +19,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" renderer: ports: - "81:80" @@ -31,8 +27,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" registrar: volumes: - type: bind @@ -53,8 +47,6 @@ services: configs: - source: mapcache-dev target: /mapcache-template.xml - logging: - driver: "fluentd" preprocessor: volumes: - type: tmpfs @@ -65,8 +57,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" networks: extnet: name: dem-extnet diff --git a/docker-compose.emg.dev.yml b/docker-compose.emg.dev.yml index 8e8c7d65bfbddd9875a8e4e9441ab53510c040d9..af436d2e83c8e6a65fe69ad5473896b4ec523bd1 100644 --- a/docker-compose.emg.dev.yml +++ b/docker-compose.emg.dev.yml @@ -9,8 +9,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" client: ports: - "80:80" @@ -21,8 +19,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" renderer: ports: - "81:80" @@ -31,8 +27,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" registrar: volumes: - type: bind @@ -41,8 +35,6 @@ services: - type: bind source: ./core/ target: /core/ - logging: - driver: "fluentd" cache: ports: - "83:80" @@ -53,8 +45,6 @@ services: configs: - source: mapcache-dev target: /mapcache-template.xml - logging: - driver: "fluentd" preprocessor: volumes: - type: tmpfs @@ -65,8 +55,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" networks: extnet: name: emg-extnet diff --git a/docker-compose.logging.dev.yml b/docker-compose.logging.dev.yml index 6ce3f23db24f96836bfef5b3f17bc2af06d83931..d749cb97edd584b85c8bbe46b708d06e2653ee1f 100644 --- a/docker-compose.logging.dev.yml +++ b/docker-compose.logging.dev.yml @@ -11,6 +11,24 @@ services: resources: limits: memory: 500M + database: + logging: + driver: "fluentd" + client: + logging: + driver: "fluentd" + renderer: + logging: + driver: "fluentd" + registrar: + logging: + driver: "fluentd" + cache: + logging: + driver: "fluentd" + preprocessor: + logging: + driver: "fluentd" kibana: ports: - "5601:5601" diff --git a/docker-compose.vhr18.dev.yml b/docker-compose.vhr18.dev.yml index 1a576b4abd0748a1d93811616801719a3568df97..e7c46c3f2348a66e5fa40df62c5c86dc5557d3de 100644 --- a/docker-compose.vhr18.dev.yml +++ b/docker-compose.vhr18.dev.yml @@ -9,8 +9,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" client: ports: - "80:80" @@ -21,8 +19,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" renderer: ports: - "81:80" @@ -31,8 +27,6 @@ services: - type: bind source: ./data/ target: /data/ - logging: - driver: "fluentd" registrar: volumes: - type: bind @@ -41,8 +35,6 @@ services: - type: bind source: ./core/ target: /core/ - logging: - driver: "fluentd" cache: ports: - "83:80" @@ -53,8 +45,6 @@ services: configs: - source: mapcache-dev target: /mapcache-template.xml - logging: - driver: "fluentd" preprocessor: volumes: - type: tmpfs @@ -62,8 +52,6 @@ services: - type: bind source: ./preprocessor/ target: /preprocessor/ - logging: - driver: "fluentd" networks: extnet: name: vhr18-extnet diff --git a/documentation/operator-guide/ingestion.rst b/documentation/operator-guide/ingestion.rst index dc1b31aaab837ab4514ec8949c3047d389a7cb34..16be9b8cf531ba9df398debe962f7c313d9b7c27 100644 --- a/documentation/operator-guide/ingestion.rst +++ b/documentation/operator-guide/ingestion.rst @@ -146,9 +146,9 @@ registrar can be accomplished. Preprocessing ~~~~~~~~~~~~~ -In this section all command examples are assumed to be run from within a running -preprocessor container. To open a shell on a preprocessor, the following -command can be used. +In this section all command examples are assumed to be run from within a +running preprocessor container. To open a shell on a preprocessor, the +following command can be used. .. code-block:: bash diff --git a/env_setup.sh b/env_setup.sh deleted file mode 100644 index 9f2bb95bc7ffe5d2cb9e26b7676bf2c464ba304d..0000000000000000000000000000000000000000 --- a/env_setup.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh -cat $vhr18_db > ./env/vhr18_db.env -cat $vhr18_django > ./env/vhr18_django.env -cat $vhr18_obs > ./env/vhr18_obs.env - -cat $emg_db > ./env/emg_db.env -cat $emg_django > ./env/emg_django.env -cat $emg_obs > ./env/emg_obs.env - - -set -o allexport - -source ./env/emg_db.env -source ./env/vhr18_db.env - -set +o allexport - - -sed -i -e 's/emg-data/pvs_testing/g' ./env/emg.env -sed -i -e 's/vhr18-data/pvs_testing/g' ./env/vhr18.env - -sed -i -e 's/emg-cache/pvs_testing/g' ./env/emg_obs.env -sed -i -e 's/vhr18-cache/pvs_testing/g' ./env/vhr18_obs.env diff --git a/registrar_test.py b/registrar_test.py deleted file mode 100644 index 7d2df6332b55589a3d6ad7f80b91937599ff35dd..0000000000000000000000000000000000000000 --- a/registrar_test.py +++ /dev/null @@ -1,48 +0,0 @@ -import psycopg2 -import os -import csv - - -with open('./env/emg_db.env', 'r') as f: - env = dict( - line.split('=', 1) - for line in f - ) -database= env['DB_NAME'].replace('\n','') -port = env['DB_PORT'].replace('\n','') -host = env['DB_HOST'].replace('\n','') -database_password= env['DB_PW'].replace('\n','') -database_user = env['DB_USER'].replace('\n','') - - -def connect_to_db(eo_id): - global db_name, coverage_id - connection= None - try: - connection = psycopg2.connect(dbname=database, user=database_user, password=database_password, host='docker', port=port) - cursor = connection.cursor() - db_name = connection.get_dsn_parameters()["dbname"] - postgreSQL_select_Query = "SELECT identifier FROM coverages_eoobject WHERE identifier = '%s';" % eo_id - cursor.execute(postgreSQL_select_Query) - coverage_id = cursor.fetchone()[0] - - except (Exception, psycopg2.Error) as error : - print ("Error while connecting to PostgreSQL", error) - finally: - #closing database connection. - if connection: - cursor.close() - connection.close() - print("PostgreSQL connection is closed") - - - -def test_db_name(name): - with open(name, newline='') as csvfile: - spamreader = csv.reader(csvfile) - for row in spamreader: - identifier = row[0].split('/')[4] - connect_to_db(identifier) - assert coverage_id == identifier - assert db_name == database - diff --git a/testing/docker-stack-wait.sh b/testing/docker-stack-wait.sh new file mode 100755 index 0000000000000000000000000000000000000000..c7f00a3199b8562cd7759e4f481709a306a9768a --- /dev/null +++ b/testing/docker-stack-wait.sh @@ -0,0 +1,148 @@ +#!/bin/sh + +# By: Brandon Mitchell <public@bmitch.net> +# License: MIT +# Source repo: https://github.com/sudo-bmitch/docker-stack-wait + +set -e +trap "{ exit 1; }" TERM INT +opt_h=0 +opt_r=0 +opt_s=5 +opt_t=3600 +start_epoc=$(date +%s) + +usage() { + echo "$(basename $0) [opts] stack_name" + echo " -f filter: only wait for services matching filter, may be passed multiple" + echo " times, see docker stack services for the filter syntax" + echo " -h: this help message" + echo " -n name: only wait for specific service names, overrides any filters," + echo " may be passed multiple times, do not include the stack name prefix" + echo " -r: treat a rollback as successful" + echo " -s sec: frequency to poll service state (default $opt_s sec)" + echo " -t sec: timeout to stop waiting" + [ "$opt_h" = "1" ] && exit 0 || exit 1 +} +check_timeout() { + # timeout when a timeout is defined and we will exceed the timeout after the + # next sleep completes + if [ "$opt_t" -gt 0 ]; then + cur_epoc=$(date +%s) + cutoff_epoc=$(expr ${start_epoc} + $opt_t - $opt_s) + if [ "$cur_epoc" -gt "$cutoff_epoc" ]; then + echo "Error: Timeout exceeded" + exit 1 + fi + fi +} +get_service_ids() { + if [ -n "$opt_n" ]; then + service_list="" + for name in $opt_n; do + service_list="${service_list:+${service_list} }${stack_name}_${name}" + done + docker service inspect --format '{{.ID}}' ${service_list} + else + docker stack services ${opt_f} -q "${stack_name}" + fi +} +service_state() { + # output the state when it changes from the last state for the service + service=$1 + # strip any invalid chars from service name for caching state + service_safe=$(echo "$service" | sed 's/[^A-Za-z0-9_]/_/g') + state=$2 + if eval [ \"\$cache_${service_safe}\" != \"\$state\" ]; then + echo "Service $service state: $state" + eval cache_${service_safe}=\"\$state\" + fi +} + +while getopts 'f:hn:rs:t:' opt; do + case $opt in + f) opt_f="${opt_f:+${opt_f} }-f $OPTARG";; + h) opt_h=1;; + n) opt_n="${opt_n:+${opt_n} } $OPTARG";; + r) opt_r=1;; + s) opt_s="$OPTARG";; + t) opt_t="$OPTARG";; + esac +done +shift $(expr $OPTIND - 1) + +if [ $# -ne 1 -o "$opt_h" = "1" -o "$opt_s" -le "0" ]; then + usage +fi + +stack_name=$1 + +# 0 = running, 1 = success, 2 = error +stack_done=0 +while [ "$stack_done" != "1" ]; do + stack_done=1 + # run get_service_ids outside of the for loop to catch errors + service_ids=$(get_service_ids) + for service_id in ${service_ids}; do + service_done=1 + service=$(docker service inspect --format '{{.Spec.Name}}' "$service_id") + + # hardcode a "new" state when UpdateStatus is not defined + state=$(docker service inspect -f '{{if .UpdateStatus}}{{.UpdateStatus.State}}{{else}}new{{end}}' "$service_id") + + # check for failed update states + case "$state" in + paused|rollback_paused) + service_done=2 + ;; + rollback_*) + if [ "$opt_r" = "0" ]; then + service_done=2 + fi + ;; + esac + + # identify/report current state + if [ "$service_done" != "2" ]; then + replicas=$(docker service ls --format '{{.Replicas}}' --filter "id=$service_id" | cut -d' ' -f1) + current=$(echo "$replicas" | cut -d/ -f1) + target=$(echo "$replicas" | cut -d/ -f2) + if [ "$current" != "$target" ]; then + # actively replicating service + service_done=0 + state="replicating $replicas" + fi + fi + service_state "$service" "$state" + + # check for states that indicate an update is done + if [ "$service_done" = "1" ]; then + case "$state" in + new|completed|rollback_completed) + service_done=1 + ;; + *) + # any other state is unknown, not necessarily finished + service_done=0 + ;; + esac + fi + + # update stack done state + if [ "$service_done" = "2" ]; then + # error condition + stack_done=2 + elif [ "$service_done" = "0" -a "$stack_done" = "1" ]; then + # only go to an updating state if not in an error state + stack_done=0 + fi + done + if [ "$stack_done" = "2" ]; then + echo "Error: This deployment will not complete" + exit 1 + fi + if [ "$stack_done" != "1" ]; then + check_timeout + sleep "${opt_s}" + fi +done diff --git a/gitlab_test.sh b/testing/gitlab_test.sh old mode 100644 new mode 100755 similarity index 54% rename from gitlab_test.sh rename to testing/gitlab_test.sh index 1b0972c92f180d9cf6090c5d457169b5408899ab..348a28c1fe0c8b398e40c27016d2828672db8ec2 --- a/gitlab_test.sh +++ b/testing/gitlab_test.sh @@ -1,15 +1,36 @@ #!/bin/sh -chmod +x env_setup.sh wait_for_container.sh -./env_setup.sh + +# fetch secrets and write them to their according files +cat $vhr18_db > ../env/vhr18_db.env +cat $vhr18_django > ../env/vhr18_django.env +cat $vhr18_obs > ../env/vhr18_obs.env + +cat $emg_db > ../env/emg_db.env +cat $emg_django > ../env/emg_django.env +cat $emg_obs > ../env/emg_obs.env + + +# use `pvs_testing` bucket instead + +sed -i -e 's/emg-data/pvs_testing/g' ../env/emg.env +sed -i -e 's/vhr18-data/pvs_testing/g' ../env/vhr18.env + +sed -i -e 's/emg-cache/pvs_testing/g' ../env/emg_obs.env +sed -i -e 's/vhr18-cache/pvs_testing/g' ../env/vhr18_obs.env + mkdir data docker swarm init docker network create -d overlay emg-extnet -docker stack deploy -c docker-compose.emg.yml -c docker-compose.emg.dev.yml -c docker-compose.logging.yml emg-pvs +docker stack deploy -c ../docker-compose.emg.yml -c ../docker-compose.emg.dev.yml emg-pvs apk update && apk add bash postgresql-dev gcc python3-dev musl-dev py-pip gdal pip3 install -r requirements.txt -./wait_for_container.sh + +./docker-stack-wait.sh -n renderer -n registrar -n preprocessor emg-pvs + +docker service ls + bash ./registrar_test.sh product_list.csv # docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py storage create pvs_testing pvs_testing --type swift --storage-auth auth-cloud-ovh # docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /core/registrar.py --objects-prefix "OA/PH1B/0.1/b9/urn:eop:PHR:MULTISPECTRAL_0.5m:DS_PHR1B_201608070959189_FR1_PX_E012N32_0719_00974_4148/0000/PH1B_PHR_FUS_1A_20160807T095918_20160807T095920_TOU_1234_4148.DIMA.tar" -# pytest -s registrar_test.py --name OA/PH1B/0.1/b9/urn:eop:PHR:MULTISPECTRAL_0.5m:DS_PHR1B_201608070959189_FR1_PX_E012N32_0719_00974_4148/0000/PH1B_PHR_FUS_1A_20160807T095918_20160807T095920_TOU_1234_4148.DIMA.tar \ No newline at end of file +# pytest -s registrar_test.py --name OA/PH1B/0.1/b9/urn:eop:PHR:MULTISPECTRAL_0.5m:DS_PHR1B_201608070959189_FR1_PX_E012N32_0719_00974_4148/0000/PH1B_PHR_FUS_1A_20160807T095918_20160807T095920_TOU_1234_4148.DIMA.tar diff --git a/product_list.csv b/testing/product_list.csv similarity index 100% rename from product_list.csv rename to testing/product_list.csv diff --git a/testing/registrar_test.py b/testing/registrar_test.py new file mode 100644 index 0000000000000000000000000000000000000000..48436d4ea82b26adee54de22afd6580ca2a51bb5 --- /dev/null +++ b/testing/registrar_test.py @@ -0,0 +1,41 @@ +import os +import csv + +import pytest +import psycopg2 +from dotenv import load_dotenv + + +@pytest.fixture(scope="session") +def connection(): + load_dotenv(dotenv_path='../env/emg_db.env') + + connect_args = dict( + dbname=os.environ['DB_NAME'], + user=os.environ['DB_USER'], + password=f"\"{os.environ['DB_PW']}\"", + host='docker', + port=os.environ['DB_PORT'], + ) + + with psycopg2.connect(**connect_args) as connection: + yield connection + + +@pytest.fixture +def identifiers(): + with open('./product_list.csv') as f: + yield csv.reader(f) + + +def query_eo_object(connection, eo_id): + query = f"SELECT identifier FROM coverages_eoobject WHERE identifier = '{eo_id}';" + with connection.cursor() as cursor: + cursor.execute(query) + return cursor.fetchone()[0] + + +def test_db_name(connection, identifiers): + for row in identifiers: + identifier = row[0].split('/')[4] + query_eo_object(connection, identifier) diff --git a/registrar_test.sh b/testing/registrar_test.sh similarity index 76% rename from registrar_test.sh rename to testing/registrar_test.sh index d394a2d1de2fe73b2157adab0136e08c2d2c7708..2a7696c10d22be88b4a6460099a01a4484f6b635 100755 --- a/registrar_test.sh +++ b/testing/registrar_test.sh @@ -2,9 +2,8 @@ product_list_file=$1 docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py storage create pvs_testing pvs_testing --type swift --storage-auth auth-cloud-ovh IFS="," -while read product; do +while read product; do docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /registrar.py --objects-prefix $product <<<$product - -done < $product_list_file +done < "$product_list_file" -pytest -s registrar_test.py --name $product_list_file +pytest #-s registrar_test.py --name $product_list_file diff --git a/requirements.txt b/testing/requirements.txt similarity index 81% rename from requirements.txt rename to testing/requirements.txt index 56ba698664ddb78df20e57a7c6159d1da82feba9..43d82cd782b2b2d1884b394390eaf0abab8aaeac 100644 --- a/requirements.txt +++ b/testing/requirements.txt @@ -1,4 +1,5 @@ pytest psycopg2 +python-dotenv # python-swiftclient # python-keystoneclient \ No newline at end of file diff --git a/wait_for_container.sh b/wait_for_container.sh deleted file mode 100755 index 8a6b5f97f19a839f79fc748e98fdc6c48c0bef61..0000000000000000000000000000000000000000 --- a/wait_for_container.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh -while [ -z $(docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py id list) ]; do - >&2 echo "Collection is not created yet - sleeping" - sleep 20 - done - -while [ -z $(docker exec -i $(docker ps -qf "name=emg-pvs_registrar") python3 /var/www/pvs/dev/pvs_instance/manage.py id list) ]; do - >&2 echo "Collection is not created yet - sleeping" - sleep 20 - done