diff --git a/.github/integration/scripts/make_db_credentials.sh b/.github/integration/scripts/make_db_credentials.sh index 4350f1d17..545319ffe 100644 --- a/.github/integration/scripts/make_db_credentials.sh +++ b/.github/integration/scripts/make_db_credentials.sh @@ -4,7 +4,7 @@ set -e apt-get -o DPkg::Lock::Timeout=60 update > /dev/null apt-get -o DPkg::Lock::Timeout=60 install -y postgresql-client >/dev/null -for n in api auth download finalize inbox ingest mapper sync verify; do +for n in api auth download finalize inbox ingest mapper rotatekey sync verify; do echo "creating credentials for: $n" psql -U postgres -h migrate -d sda -c "ALTER ROLE $n LOGIN PASSWORD '$n';" psql -U postgres -h postgres -d sda -c "ALTER ROLE $n LOGIN PASSWORD '$n';" diff --git a/.github/integration/scripts/make_sda_credentials.sh b/.github/integration/scripts/make_sda_credentials.sh index 9beaec98d..465fd827a 100644 --- a/.github/integration/scripts/make_sda_credentials.sh +++ b/.github/integration/scripts/make_sda_credentials.sh @@ -9,12 +9,12 @@ if [ -n "$PGSSLCERT" ]; then fi apt-get -o DPkg::Lock::Timeout=60 update > /dev/null -apt-get -o DPkg::Lock::Timeout=60 install -y curl jq openssh-client openssl postgresql-client >/dev/null +apt-get -o DPkg::Lock::Timeout=60 install -y curl jq openssh-client openssl postgresql-client xxd >/dev/null pip install --upgrade pip > /dev/null pip install aiohttp Authlib joserfc requests > /dev/null -for n in api auth download finalize inbox ingest mapper sync verify; do +for n in api auth download finalize inbox ingest mapper rotatekey sync verify; do echo "creating credentials for: $n" psql -U postgres -h postgres -d sda -c "ALTER ROLE $n LOGIN PASSWORD '$n';" psql -U postgres -h postgres -d sda -c "GRANT base TO $n;" @@ -106,11 +106,6 @@ if [ ! -f "/shared/c4gh.sec.pem" ]; then /shared/crypt4gh generate -n /shared/c4gh -p c4ghpass fi -if [ ! -f "/shared/c4gh1.sec.pem" ]; then - echo "creating crypth4gh key" - /shared/crypt4gh generate -n /shared/c4gh1 -p c4ghpass -fi - if [ ! -f "/shared/client.sec.pem" ]; then # client key for re-encryption echo "creating client crypth4gh key" /shared/crypt4gh generate -n /shared/client -p c4ghpass @@ -121,6 +116,22 @@ if [ ! -f "/shared/sync.sec.pem" ]; then /shared/crypt4gh generate -n /shared/sync -p syncPass fi +if [ ! -f "/shared/rotatekey.sec.pem" ]; then + echo "creating rotatekey crypth4gh key" + /shared/crypt4gh generate -n /shared/rotatekey -p rotatekeyPass +fi + +# register the rotation key in the db +resp=$(psql -U postgres -h postgres -d sda -At -c "SELECT description FROM sda.encryption_keys;") +if ! echo "$resp" | grep -q 'this is the new key to rotate to'; then + rotateKeyHash=$(cat /shared/rotatekey.pub.pem | awk 'NR==2' | base64 -d | xxd -p -c256) + resp=$(psql -U postgres -h postgres -d sda -At -c "INSERT INTO sda.encryption_keys(key_hash, description) VALUES('$rotateKeyHash', 'this is the new key to rotate to');") + if [ "$(echo "$resp" | tr -d '\n')" != "INSERT 0 1" ]; then + echo "insert keyhash failed" + exit 1 + fi +fi + if [ ! -f "/shared/keys/ssh" ]; then ssh-keygen -o -a 256 -t ed25519 -f /shared/keys/ssh -N "" pubKey="$(cat /shared/keys/ssh.pub)" diff --git a/.github/integration/sda-s3-integration.yml b/.github/integration/sda-s3-integration.yml index ab9da116d..bc852002d 100644 --- a/.github/integration/sda-s3-integration.yml +++ b/.github/integration/sda-s3-integration.yml @@ -11,7 +11,7 @@ services: condition: service_healthy environment: - PGPASSWORD=rootpasswd - image: python:3.11-slim + image: python:3.11-slim-bookworm volumes: - ./scripts:/scripts - shared:/shared @@ -253,6 +253,29 @@ services: - ./sda/config.yaml:/config.yaml - shared:/shared + rotatekey: + image: ghcr.io/neicnordic/sensitive-data-archive:PR${PR_NUMBER} + command: [sda-rotatekey] + container_name: rotatekey + depends_on: + credentials: + condition: service_completed_successfully + postgres: + condition: service_healthy + rabbitmq: + condition: service_healthy + environment: + - BROKER_PASSWORD=rotatekey + - BROKER_USER=rotatekey + - BROKER_QUEUE=rotatekey + - BROKER_ROUTINGKEY=rotatekey + - DB_PASSWORD=rotatekey + - DB_USER=rotatekey + restart: always + volumes: + - ./sda/config.yaml:/config.yaml + - shared:/shared + cega-nss: container_name: cega-nss depends_on: @@ -384,6 +407,8 @@ services: condition: service_started reencrypt: condition: service_started + rotatekey: + condition: service_started extra_hosts: - "localhost:host-gateway" environment: diff --git a/.github/integration/sda/config.yaml b/.github/integration/sda/config.yaml index e5473a023..82733287c 100644 --- a/.github/integration/sda/config.yaml +++ b/.github/integration/sda/config.yaml @@ -74,11 +74,12 @@ c4gh: filePath: /shared/c4gh.sec.pem passphrase: "c4ghpass" syncPubKeyPath: /shared/sync.pub.pem + rotatePubKeyPath: /shared/rotatekey.pub.pem privateKeys: - filePath: /shared/c4gh.sec.pem passphrase: "c4ghpass" - - filePath: /shared/c4gh1.sec.pem - passphrase: "c4ghpass" + - filePath: /shared/rotatekey.sec.pem + passphrase: "rotatekeyPass" oidc: id: XC56EL11xx diff --git a/.github/integration/sda/rbac.json b/.github/integration/sda/rbac.json index 515d7bb45..401f4bf8e 100644 --- a/.github/integration/sda/rbac.json +++ b/.github/integration/sda/rbac.json @@ -74,4 +74,4 @@ "rolebinding": "admin" } ] - } \ No newline at end of file + } diff --git a/.github/integration/tests/postgres/10_sanity_check.sh b/.github/integration/tests/postgres/10_sanity_check.sh index af7b270ba..076815266 100644 --- a/.github/integration/tests/postgres/10_sanity_check.sh +++ b/.github/integration/tests/postgres/10_sanity_check.sh @@ -22,7 +22,7 @@ if [ "$status" -eq 0 ]; then fi ## verify that migrations worked -migratedb=$(find /migratedb.d/ -name "*.sql" -printf '%f\n' | sort -n | tail -1 | cut -d '.' -f1) +migratedb=$(find /migratedb.d/ -name "*.sql" -printf '%f\n' | sort -n | tail -1 | cut -d '.' -f1 | cut -d '_' -f1) version=$(psql -U postgres -h migrate -d sda -At -c "select max(version) from sda.dbschema_version;") if [ "$version" -ne "$migratedb" ]; then echo "Migration scripts failed" diff --git a/.github/integration/tests/postgres/20_inbox_queries.sh b/.github/integration/tests/postgres/20_inbox_queries.sh index f2e8d51be..43f39c027 100644 --- a/.github/integration/tests/postgres/20_inbox_queries.sh +++ b/.github/integration/tests/postgres/20_inbox_queries.sh @@ -1,16 +1,16 @@ #!/bin/sh set -eou pipefail - +fileID="33d29907-c565-4a90-98b4-e31b992ab376" export PGPASSWORD=inbox for host in migrate postgres; do - fileID=$(psql -U inbox -h "$host" -d sda -At -c "SELECT sda.register_file('inbox/test-file.c4gh', 'test-user');") + fileID=$(psql -U inbox -h "$host" -d sda -At -c "SELECT sda.register_file('$fileID', 'inbox/test-file.c4gh', 'test-user');") if [ -z "$fileID" ]; then echo "register_file failed" exit 1 fi - newFileID=$(psql -U inbox -h "$host" -d sda -At -c "SELECT sda.register_file('inbox/test-file.c4gh', 'other-user');") + newFileID=$(psql -U inbox -h "$host" -d sda -At -c "SELECT sda.register_file(null, 'inbox/test-file.c4gh', 'other-user');") if [ -z "$newFileID" ]; then echo "register_file failed" exit 1 diff --git a/.github/integration/tests/postgres/30_ingest_queries.sh b/.github/integration/tests/postgres/30_ingest_queries.sh index 283ae557b..8801bf31d 100644 --- a/.github/integration/tests/postgres/30_ingest_queries.sh +++ b/.github/integration/tests/postgres/30_ingest_queries.sh @@ -3,17 +3,17 @@ set -eou pipefail export PGPASSWORD=ingest user="test-user" -corrID="33d29907-c565-4a90-98b4-e31b992ab376" +fileID="33d29907-c565-4a90-98b4-e31b992ab376" for host in migrate postgres; do ## insert file - fileID=$(psql -U ingest -h "$host" -d sda -At -c "SELECT sda.register_file('inbox/test-file.c4gh', '$user');") + fileID=$(psql -U ingest -h "$host" -d sda -At -c "SELECT sda.register_file('$fileID', 'inbox/test-file.c4gh', '$user');") if [ -z "$fileID" ]; then echo "register_file failed" exit 1 fi - resp=$(psql -U ingest -h "$host" -d sda -At -c "INSERT INTO sda.file_event_log(file_id, event, correlation_id, user_id, message) VALUES('$fileID', 'submitted', '$corrID', '$user', '{}');") + resp=$(psql -U ingest -h "$host" -d sda -At -c "INSERT INTO sda.file_event_log(file_id, event, user_id, message) VALUES('$fileID', 'submitted', '$user', '{}');") if [ "$(echo "$resp" | tr -d '\n')" != "INSERT 0 1" ]; then echo "insert file failed" exit 1 @@ -30,11 +30,23 @@ for host in migrate postgres; do archive_path=d853c51b-6aed-4243-b427-177f5e588857 size="2035150" checksum="f03775a50feea74c579d459fdbeb27adafd543b87f6692703543a6ebe7daa1ff" - resp=$(psql -U ingest -h "$host" -d sda -At -c "SELECT sda.set_archived('$fileID', '$corrID', '$archive_path', '$size', '$checksum', 'SHA256');") - if [ "$resp" != "" ]; then - echo "mark file archived failed" + + resp=$(psql -U ingest -h "$host" -d sda -At -c "UPDATE sda.files SET archive_file_path = '$archive_path', archive_file_size = '$size' WHERE id = '$fileID';") + if [ "$resp" != "UPDATE 1" ]; then + echo "update of files.archive_file_path, archive_file_size failed: $resp" + exit 1 + fi + resp=$(psql -U ingest -h "$host" -d sda -At -c "INSERT INTO sda.checksums(file_id, checksum, type, source) VALUES('$fileID', '$checksum', upper('SHA256')::sda.checksum_algorithm, upper('UPLOADED')::sda.checksum_source);") + if [ "$(echo "$resp" | tr -d '\n')" != "INSERT 0 1" ]; then + echo "insert of archived checksum failed: $resp" exit 1 fi + resp=$(psql -U ingest -h "$host" -d sda -At -c "INSERT INTO sda.file_event_log(file_id, event) VALUES('$fileID', 'archived');") + if [ "$(echo "$resp" | tr -d '\n')" != "INSERT 0 1" ]; then + echo "insert of file_event_log failed: $resp" + exit 1 + fi + done echo "30_ingest_queries completed successfully" diff --git a/.github/integration/tests/postgres/40_verify_queries.sh b/.github/integration/tests/postgres/40_verify_queries.sh index 4b3653aaa..99e87b53c 100644 --- a/.github/integration/tests/postgres/40_verify_queries.sh +++ b/.github/integration/tests/postgres/40_verify_queries.sh @@ -2,15 +2,14 @@ set -eou pipefail export PGPASSWORD=verify -corrID="33d29907-c565-4a90-98b4-e31b992ab376" +fileID="33d29907-c565-4a90-98b4-e31b992ab376" for host in migrate postgres; do - fileID=$(psql -U verify -h "$host" -d sda -At -c "SELECT DISTINCT file_id from sda.file_event_log WHERE correlation_id = '$corrID';") ## get file status - status=$(psql -U verify -h "$host" -d sda -At -c "SELECT event from sda.file_event_log WHERE correlation_id = '$corrID' ORDER BY id DESC LIMIT 1;") + status=$(psql -U verify -h "$host" -d sda -At -c "SELECT event from sda.file_event_log WHERE file_id = '$fileID' ORDER BY id DESC LIMIT 1;") if [ "$status" = "" ]; then - echo "get file status failed" + echo "get file status failed: $resp" exit 1 fi @@ -18,7 +17,7 @@ for host in migrate postgres; do header="637279707434676801000000010000006c00000000000000" dbheader=$(psql -U verify -h "$host" -d sda -At -c "SELECT header from sda.files WHERE id = '$fileID';") if [ "$dbheader" != "$header" ]; then - echo "wrong header received" + echo "wrong header received: $resp" exit 1 fi @@ -26,11 +25,32 @@ for host in migrate postgres; do archive_checksum="64e56b0d245b819c116b5f1ad296632019490b57eeaebb419a5317e24a153852" decrypted_size="2034254" decrypted_checksum="febee6829a05772eea93c647e38bf5cc5bf33d1bcd0ea7d7bdd03225d84d2553" - resp=$(psql -U verify -h "$host" -d sda -At -c "SELECT sda.set_verified('$fileID', '$corrID', '$archive_checksum', 'SHA256', '$decrypted_size', '$decrypted_checksum', 'SHA256')") - if [ "$resp" != "" ]; then - echo "set_verified failed" + + resp=$(psql -U verify -h "$host" -d sda -At -c "UPDATE sda.files SET decrypted_file_size = '$decrypted_size' WHERE id = '$fileID';") + if [ "$resp" != "UPDATE 1" ]; then + echo "update of files.decrypted_file_size failed: $resp" + exit 1 + fi + + resp=$(psql -U verify -h "$host" -d sda -At -c "INSERT INTO sda.checksums(file_id, checksum, type, source) VALUES('$fileID', '$archive_checksum', upper('SHA256')::sda.checksum_algorithm, upper('ARCHIVED')::sda.checksum_source);") + if [ "$(echo "$resp" | tr -d '\n')" != "INSERT 0 1" ]; then + echo "insert of archived checksum failed: $resp" + exit 1 + fi + + resp=$(psql -U verify -h "$host" -d sda -At -c "INSERT INTO sda.checksums(file_id, checksum, type, source) VALUES('$fileID', '$decrypted_checksum', upper('SHA256')::sda.checksum_algorithm, upper('UNENCRYPTED')::sda.checksum_source);") + if [ "$(echo "$resp" | tr -d '\n')" != "INSERT 0 1" ]; then + echo "insert of decrypted checksum failed: $resp" + exit 1 + fi + + resp=$(psql -U verify -h "$host" -d sda -At -c "INSERT INTO sda.file_event_log(file_id, event) VALUES('$fileID', 'verified');") + if [ "$(echo "$resp" | tr -d '\n')" != "INSERT 0 1" ]; then + echo "insert of file_event_log failed: $resp" exit 1 fi + + done echo "40_verify_queries completed successfully" diff --git a/.github/integration/tests/sda/21_cancel_test.sh b/.github/integration/tests/sda/21_cancel_test.sh index c7482bdba..f67dffe08 100644 --- a/.github/integration/tests/sda/21_cancel_test.sh +++ b/.github/integration/tests/sda/21_cancel_test.sh @@ -55,7 +55,7 @@ curl -k -u guest:guest "http://rabbitmq:15672/api/exchanges/sda/sda/publish" \ -d "$cancel_body" | jq # check database to verify file status -if [ "$(psql -U postgres -h postgres -d sda -At -c "select event from sda.file_event_log where correlation_id = '$CORRID' order by id DESC LIMIT 1")" != "disabled" ]; then +if [ "$(psql -U postgres -h postgres -d sda -At -c "select event from sda.file_event_log where file_id = '$CORRID' order by id DESC LIMIT 1")" != "disabled" ]; then echo "canceling file failed" exit 1 fi diff --git a/.github/integration/tests/sda/22_error_test.sh b/.github/integration/tests/sda/22_error_test.sh index ea79144e8..5ea7ae946 100644 --- a/.github/integration/tests/sda/22_error_test.sh +++ b/.github/integration/tests/sda/22_error_test.sh @@ -71,7 +71,7 @@ curl -k -u guest:guest "$URI/api/exchanges/sda/sda/publish" \ -d "$ingest_body" | jq # check database to verify file status -until [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT event FROM sda.file_event_log WHERE correlation_id = '$CORRID' ORDER BY ID DESC LIMIT 1;")" = "error" ]; do +until [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT event FROM sda.file_event_log WHERE file_id = '$CORRID' ORDER BY ID DESC LIMIT 1;")" = "error" ]; do echo "waiting for file error to be logged by ingest" RETRY_TIMES=$((RETRY_TIMES + 1)) if [ "$RETRY_TIMES" -eq 30 ]; then @@ -83,7 +83,7 @@ done ## give the file a non existing archive path psql -U postgres -h postgres -d sda -Atq -c "UPDATE sda.files SET archive_file_path = '$CORRID', header = '637279707434676801000000010000006c00000000000000' WHERE id = '$CORRID';" -psql -U postgres -h postgres -d sda -Atq -c "INSERT INTO sda.file_event_log(file_id, correlation_id, event) VALUES('$CORRID', '$CORRID', 'archived');" +psql -U postgres -h postgres -d sda -Atq -c "INSERT INTO sda.file_event_log(file_id, event) VALUES('$CORRID', 'archived');" encrypted_checksums=$( jq -c -n \ @@ -119,7 +119,7 @@ curl -k -u guest:guest "$URI/api/exchanges/sda/sda/publish" \ # check database to verify file status RETRY_TIMES=0 -until [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT event FROM sda.file_event_log WHERE correlation_id = '$CORRID' ORDER BY ID DESC LIMIT 1;")" = "error" ]; do +until [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT event FROM sda.file_event_log WHERE file_id = '$CORRID' ORDER BY ID DESC LIMIT 1;")" = "error" ]; do echo "waiting for file error to be logged by verify" date RETRY_TIMES=$((RETRY_TIMES + 1)) diff --git a/.github/integration/tests/sda/31_cancel_test2.sh b/.github/integration/tests/sda/31_cancel_test2.sh index 0095a0687..8b6101c34 100644 --- a/.github/integration/tests/sda/31_cancel_test2.sh +++ b/.github/integration/tests/sda/31_cancel_test2.sh @@ -7,13 +7,13 @@ ENC_SHA=$(sha256sum NA12878.bam.c4gh | cut -d' ' -f 1) ENC_MD5=$(md5sum NA12878.bam.c4gh | cut -d' ' -f 1) ## get correlation id from message -CORRID=$(psql -U postgres -h postgres -d sda -At -c "select id from sda.files where submission_file_path = 'NA12878.bam.c4gh';") +FILEID=$(psql -U postgres -h postgres -d sda -At -c "select id from sda.files where submission_file_path = 'NA12878.bam.c4gh';") properties=$( jq -c -n \ --argjson delivery_mode 2 \ - --arg correlation_id "$CORRID" \ + --arg correlation_id "$FILEID" \ --arg content_encoding UTF-8 \ --arg content_type application/json \ '$ARGS.named' @@ -52,7 +52,7 @@ curl -k -u guest:guest "http://rabbitmq:15672/api/exchanges/sda/sda/publish" \ # check database to verify file status RETRY_TIMES=0 -until [ "$(psql -U postgres -h postgres -d sda -At -c "select event from sda.file_event_log where correlation_id = '$CORRID' order by id DESC LIMIT 1;")" = "disabled" ]; do +until [ "$(psql -U postgres -h postgres -d sda -At -c "select event from sda.file_event_log where file_id = '$FILEID' order by id DESC LIMIT 1;")" = "disabled" ]; do echo "canceling file failed" RETRY_TIMES=$((RETRY_TIMES + 1)) if [ "$RETRY_TIMES" -eq 30 ]; then @@ -132,7 +132,7 @@ curl -s -u guest:guest "http://rabbitmq:15672/api/exchanges/sda/sda/publish" \ -d "$accession_body" | jq RETRY_TIMES=0 -until [ "$(psql -U postgres -h postgres -d sda -At -c "select event from sda.file_event_log where correlation_id = '$CORRID' order by id DESC LIMIT 1")" = "ready" ]; do +until [ "$(psql -U postgres -h postgres -d sda -At -c "select event from sda.file_event_log where file_id = '$FILEID' order by id DESC LIMIT 1")" = "ready" ]; do echo "waiting for re-ingested file to become ready" RETRY_TIMES=$((RETRY_TIMES + 1)) if [ "$RETRY_TIMES" -eq 30 ]; then diff --git a/.github/integration/tests/sda/45_sync_test.sh b/.github/integration/tests/sda/45_sync_test.sh index 5dc6d1221..f4fe2d56d 100644 --- a/.github/integration/tests/sda/45_sync_test.sh +++ b/.github/integration/tests/sda/45_sync_test.sh @@ -11,7 +11,7 @@ fi # check bucket for synced files for file in NA12878.bai NA12878_20k_b37.bai; do RETRY_TIMES=0 - until [ "$(s3cmd -c direct ls s3://sync/test_dummy.org/"$file")" != "" ]; do + until [ "$(s3cmd -c direct ls s3://sync/"$file")" != "" ]; do RETRY_TIMES=$((RETRY_TIMES + 1)) if [ "$RETRY_TIMES" -eq 30 ]; then echo "::error::Time out while waiting for files to be synced" diff --git a/.github/integration/tests/sda/60_api_admin_test.sh b/.github/integration/tests/sda/60_api_admin_test.sh index 2094287a1..d4340c05e 100644 --- a/.github/integration/tests/sda/60_api_admin_test.sh +++ b/.github/integration/tests/sda/60_api_admin_test.sh @@ -147,6 +147,35 @@ until [ "$(psql -U postgres -h postgres -d sda -At -c "select id from sda.file_e sleep 2 done +# Finalize file +echo "Giving accession id by using json payload" +accession_payload=$( +jq -c -n \ + --arg filepath "NE12878.bam.c4gh" \ + --arg user "test@dummy.org" \ + --arg accession_id "my-id-01" \ + '$ARGS.named' +) + +resp_accession_payload="$(curl -s -k -L -o /dev/null -w "%{http_code}\n" -H "Authorization: Bearer $token" -H "Content-Type: application/json" -X POST -d "$accession_payload" "http://api:8080/file/accession")" +if [ "$resp_accession_payload" != "200" ]; then + echo "Error when requesting to ingesting file, expected 200 got: $resp_accession_payload" + exit 1 +fi + +# Check that the file has been finalized +RETRY_TIMES=0 +until [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT event FROM sda.file_event_log WHERE file_id='$fileid' order by started_at desc limit 1;")" = "ready" ]; do + echo "waiting for finalize to complete" + RETRY_TIMES=$((RETRY_TIMES + 1)) + if [ "$RETRY_TIMES" -eq 10 ]; then + echo "::error::Time out while waiting for finalizing to complete" + exit 1 + fi + sleep 2 +done +echo "Finalize by using json payload finished successfully" + # Try to delete file not in inbox fileid="$(curl -k -L -H "Authorization: Bearer $token" "http://api:8080/users/test@dummy.org/files" | jq -r '.[] | select(.inboxPath == "NE12878.bam.c4gh") | .fileID')" resp="$(curl -s -k -L -o /dev/null -w "%{http_code}\n" -H "Authorization: Bearer $token" -X DELETE "http://api:8080/file/test@dummy.org/$fileid")" @@ -216,4 +245,51 @@ if [ "$resp" != "404" ]; then exit 1 fi -echo "API admin tests completed successfully" \ No newline at end of file +# Test ingesting file by using the file id +echo "Ingest file by using file ID" +# Reupload a file under a different name +s3cmd -c s3cfg put NA12878.bam.c4gh s3://test_dummy.org/ingest/NB12878-ingest.bam.c4gh +sleep 3 +# Find the file id of the uploaded file +new_fileid="$(curl -k -L -H "Authorization: Bearer $token" "http://api:8080/users/test@dummy.org/files" | jq -r '.[] | select(.inboxPath == "ingest/NB12878-ingest.bam.c4gh") | .fileID')" +# ingest the file +ingest_resp="$(curl -s -k -L -o /dev/null -w "%{http_code}\n" -H "Authorization: Bearer $token" -X POST "http://api:8080/file/ingest?fileid=$new_fileid")" +if [ "$ingest_resp" != "200" ]; then + echo "Error when requesting to ingesting file by the use of file id, expected 200 got: $ingest_resp" + exit 1 +fi +# Check that the file is ingested and verified +RETRY_TIMES=0 +until [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT event FROM sda.file_event_log WHERE file_id='$new_fileid' order by started_at desc limit 1;")" = "verified" ]; do + echo "waiting for verified to complete" + RETRY_TIMES=$((RETRY_TIMES + 1)) + if [ "$RETRY_TIMES" -eq 10 ]; then + echo "::error::Time out while waiting for verified to complete" + exit 1 + fi + sleep 2 +done +echo "Ingestion by using file ID finished successfully" + +# Test giving accession id to a file by using file id +echo "Giving accession id by using file id" +# The file which ingested above will be used +accession_resp="$(curl -s -k -L -o /dev/null -w "%{http_code}\n" -H "Authorization: Bearer $token" -X POST "http://api:8080/file/accession?fileid=$new_fileid&accessionid=SDA-123-asd")" +if [ "$accession_resp" != "200" ]; then + echo "Error when requesting to finalize file by the use of file id, expected 200 got: $accession_resp" + exit 1 +fi +# Check that the file has been finalized +RETRY_TIMES=0 +until [ "$(psql -U postgres -h postgres -d sda -At -c "SELECT event FROM sda.file_event_log WHERE file_id='$new_fileid' order by started_at desc limit 1;")" = "ready" ]; do + echo "waiting for finalize to complete" + RETRY_TIMES=$((RETRY_TIMES + 1)) + if [ "$RETRY_TIMES" -eq 10 ]; then + echo "::error::Time out while waiting for finalizing to complete" + exit 1 + fi + sleep 2 +done +echo "Finalize by using file ID finished successfully" + +echo "API admin tests completed successfully" diff --git a/.github/integration/tests/sda/70_rotate_key_test.sh b/.github/integration/tests/sda/70_rotate_key_test.sh new file mode 100644 index 000000000..c952d4b6f --- /dev/null +++ b/.github/integration/tests/sda/70_rotate_key_test.sh @@ -0,0 +1,288 @@ +#!/bin/sh +set -e + +if [ -n "$SYNCTEST" ]; then + exit 0 +fi + +cd shared || true + +checkErrors() { + RETRY_TIMES=0 + until [ $(("$(curl -su guest:guest http://rabbitmq:15672/api/queues/sda/error_stream/ | jq -r '.messages_ready')"-"$errorStreamSize")) -eq 1 ]; do + echo "checking for $1 error" + RETRY_TIMES=$((RETRY_TIMES + 1)) + if [ "$RETRY_TIMES" -eq 20 ]; then + echo "::error::Time out while waiting for error message" + exit 1 + fi + sleep 2 + done +} + +checkConsumers() { + RETRY_TIMES=0 + until [ "$(curl -su guest:guest http://localhost:15672/api/consumers | jq '.[].queue.name' | grep -c "$1")" -eq "$2" ]; do + echo "waiting for $1 consumer status" + RETRY_TIMES=$((RETRY_TIMES + 1)) + if [ "$RETRY_TIMES" -eq 30 ]; then + echo "::error::Time out while waiting for $1 consumer status" + exit 1 + fi + sleep 2 + done +} + +# cleanup queues and database +URI=http://rabbitmq:15672 +if [ -n "$PGSSLCERT" ]; then + URI=https://rabbitmq:15671 +fi +for q in accession archived backup completed inbox ingest mappings verified rotatekey; do + curl -s -k -u guest:guest -X DELETE "$URI/api/queues/sda/$q/contents" +done +psql -U postgres -h postgres -d sda -At -c "TRUNCATE TABLE sda.files, sda.encryption_keys CASCADE;" + +# register archive and rotation c4gh public keys +token="$(cat /shared/token)" +for keyName in c4gh rotatekey; do + payload=$( + jq -c -n \ + --arg description "this is the $keyName key" \ + --arg pubkey "$( base64 -w0 /shared/"$keyName".pub.pem)" \ + '$ARGS.named' + ) + resp="$(curl -s -k -L -o /dev/null -w "%{http_code}\n" -H "Authorization: Bearer $token" -H "Content-Type: application/json" -X POST -d "$payload" "http://api:8080/c4gh-keys/add")" + if [ "$resp" != "200" ]; then + echo "Error when adding the $keyName public key hash, expected 200 got: $resp" + exit 1 + fi +done + +# generate and upload file +file=testfile1 +if [ ! -f "$file" ]; then + dd if=/dev/urandom of="$file" count=10 bs=1M +fi +if [ ! -f "$file.c4gh" ]; then + yes | /shared/crypt4gh encrypt -p c4gh.pub.pem -f "$file" +fi +s3cmd -c s3cfg put "$file.c4gh" s3://test_dummy.org/dataset_rotatekey/ + +response="$(curl -s -k -L "http://api:8080/users/test@dummy.org/files" -H "Authorization: Bearer $token" | jq | grep -c dataset_rotatekey)" +if [ "$response" -ne 1 ]; then + echo "file for rotatekey test failed to upload" + exit 1 +fi + +## ingest file +curl -s -k -H "Authorization: Bearer $token" -H "Content-Type: application/json" -X POST -d '{"filepath": "dataset_rotatekey/testfile1.c4gh", "user": "test@dummy.org"}' http://api:8080/file/ingest +RETRY_TIMES=0 +until [ "$(curl -s -k -H "Authorization: Bearer $token" -X GET http://api:8080/users/test@dummy.org/files | jq | grep -c "verified")" -eq 1 ]; do + echo "waiting for files to become verified" + RETRY_TIMES=$((RETRY_TIMES + 1)) + if [ "$RETRY_TIMES" -eq 30 ]; then + echo "::error::Time out while waiting for files to become verified" + exit 1 + fi + sleep 2 +done + +errorStreamSize=$(curl -su guest:guest http://rabbitmq:15672/api/queues/sda/error_stream/ | jq -r '.messages_ready') + +## trigger key rotation +corrID=$( + curl -s -X POST \ + -H "content-type:application/json" \ + -u guest:guest http://rabbitmq:15672/api/queues/sda/inbox/get \ + -d '{"count":1,"encoding":"auto","ackmode":"ack_requeue_false"}' | jq -r .[0].properties.correlation_id + ) +fileID=$(psql -U postgres -h postgres -d sda -At -c "select id from sda.files where submission_file_path='dataset_rotatekey/testfile1.c4gh';") + +properties=$( + jq -c -n \ + --argjson delivery_mode 2 \ + --arg correlation_id "$corrID" \ + --arg content_encoding UTF-8 \ + --arg content_type application/json \ + '$ARGS.named' +) + +rotatekey_payload=$( + jq -r -c -n \ + --arg type "key_rotation" \ + --arg file_id "$fileID" \ + '$ARGS.named|@base64' +) + +rotatekey_body=$( + jq -c -n \ + --arg vhost test \ + --arg name sda \ + --argjson properties "$properties" \ + --arg routing_key "rotatekey" \ + --arg payload_encoding base64 \ + --arg payload "$rotatekey_payload" \ + '$ARGS.named' +) + +curl -s -u guest:guest "http://rabbitmq:15672/api/exchanges/sda/sda/publish" \ + -H 'Content-Type: application/json;charset=UTF-8' \ + -d "$rotatekey_body" | jq + +# check DB for updated key hash in sda.files +rotatekeyHash=$(psql -U postgres -h postgres -d sda -At -c "select key_hash from sda.encryption_keys where description='this is the rotatekey key';") +if [ "$(psql -U postgres -h postgres -d sda -At -c "select key_hash from sda.files where id='$fileID';" | grep -c "$rotatekeyHash")" -ne 1 ]; +then + echo "failed to update the key hash of files" + exit 1 +fi + +# check that files were re-verified +echo "waiting for re-verify to complete" +RETRY_TIMES=0 +until [ "$(curl -su guest:guest http://rabbitmq:15672/api/queues/sda/archived/ | jq -r '.messages_ready')" -eq 0 ]; do + echo "waiting for re-verify to complete" + RETRY_TIMES=$((RETRY_TIMES + 1)) + if [ "$RETRY_TIMES" -eq 30 ]; then + echo "::error::Time out while waiting for verify to complete" + exit 1 + fi + sleep 2 +done + +# check that no other erros occured +sleep 5 +if [ "$(curl -su guest:guest http://rabbitmq:15672/api/queues/sda/error_stream/ | jq -r '.messages_ready')" -ne "$errorStreamSize" ]; then + echo "something went wrong with the key rotation" + exit 1 +fi + +## download file with rotated key, concatenate header and archive body, decrypt and check + +# get rotated header +psql -U postgres -h postgres -d sda -At -c "select header from sda.files where id='$fileID';" | xxd -r -p > testfile1_rotated.c4gh + +# get archive file +archivePath=$(psql -U postgres -h postgres -d sda -At -c "select archive_file_path from sda.files where id='$fileID';") +s3cmd --access_key=access --secret_key=secretKey --host=minio:9000 --no-ssl --host-bucket=minio:9000 get s3://archive/"$archivePath" --force + +# concatenate and decrypt +cat testfile1_rotated.c4gh "$archivePath" > tmp_file && mv tmp_file testfile1_rotated.c4gh +C4GH_PASSPHRASE=rotatekeyPass ./crypt4gh decrypt -f testfile1_rotated.c4gh -s rotatekey.sec.pem + +# check that decrypted file matches the original +if [ ! -f "testfile1_rotated" ]; then + echo "decrypted file testfile1_rotated not found" + exit 1 +fi +if ! cmp -s "testfile1_rotated" "testfile1" ; then + echo "downloaded file is different from the original one" + exit 1 +fi +# compare hashes as well +if [ "$(sha256sum testfile1 | cut -d ' ' -f 1)" != "$(sha256sum testfile1_rotated | cut -d ' ' -f 1)" ]; then + echo "downloaded file has different sha256 hash from the original one" + exit 1 +fi + +### test for errors ### + +## test rotation key is deprecated during runtime +echo "test rotation key is deprecated during runtime" + +rotateKeyHash=$(cat /shared/rotatekey.pub.pem | awk 'NR==2' | base64 -d | xxd -p -c256) +resp="$(curl -s -k -L -o /dev/null -w "%{http_code}\n" -H "Authorization: Bearer $token" -H "Content-Type: application/json" -X POST "http://api:8080/c4gh-keys/deprecate/$rotateKeyHash")" +if [ "$resp" != "200" ]; then + echo "Error when trying to deprecate rotation public key hash, expected 200 got: $resp" + exit 1 +fi + +rotatekey_body=$( + jq -c -n \ + --arg vhost test \ + --arg name sda \ + --argjson properties "$properties" \ + --arg routing_key "rotatekey" \ + --arg payload_encoding base64 \ + --arg payload "$rotatekey_payload" \ + '$ARGS.named' +) + +curl -s -u guest:guest "http://rabbitmq:15672/api/exchanges/sda/sda/publish" \ + -H 'Content-Type: application/json;charset=UTF-8' \ + -d "$rotatekey_body" | jq + +# check that app failed +checkConsumers rotatekey 0 + +## test app attempts to start with a configured rotation key that is deprecated +echo "test that app fails to start with a configured rotation key that is invalid" + +sleep 2 +# app will keep failing until we restore tha target key as active +checkConsumers rotatekey 0 +deprecationDate=$(psql -U postgres -h postgres -d sda -At -c "select deprecated_at from sda.encryption_keys where deprecated_at is not null;") +psql -U postgres -h postgres -d sda -At -c "UPDATE sda.encryption_keys SET deprecated_at = null WHERE deprecated_at = '$deprecationDate';" + +# check that app recovered when it found a valid target key +checkConsumers rotatekey 1 + +## test bad message +echo "test bad mq message" + +rotatekey_payload_bad=$( + jq -r -c -n \ + --arg type "key_rotation" \ + --arg file_id "0f38b6z-9868-446f-91ab-6a83832a3f0a" \ + '$ARGS.named|@base64' +) + +rotatekey_body=$( + jq -c -n \ + --arg vhost test \ + --arg name sda \ + --argjson properties "$properties" \ + --arg routing_key "rotatekey" \ + --arg payload_encoding base64 \ + --arg payload "$rotatekey_payload_bad" \ + '$ARGS.named' +) + +curl -s -u guest:guest "http://rabbitmq:15672/api/exchanges/sda/sda/publish" \ + -H 'Content-Type: application/json;charset=UTF-8' \ + -d "$rotatekey_body" | jq + +checkErrors "validation of incoming message (rotate-key) failed" + +# update errorStream +errorStreamSize=$(curl -su guest:guest http://rabbitmq:15672/api/queues/sda/error_stream/ | jq -r '.messages_ready') + +## test non-existent fileID +echo "test non-existent fileID" + +rotatekey_payload_bad=$( + jq -r -c -n \ + --arg type "key_rotation" \ + --arg file_id "d3fc4148-6918-479c-914d-ad669041c816" \ + '$ARGS.named|@base64' +) + +rotatekey_body=$( + jq -c -n \ + --arg vhost test \ + --arg name sda \ + --argjson properties "$properties" \ + --arg routing_key "rotatekey" \ + --arg payload_encoding base64 \ + --arg payload "$rotatekey_payload_bad" \ + '$ARGS.named' +) + +curl -s -u guest:guest "http://rabbitmq:15672/api/exchanges/sda/sda/publish" \ + -H 'Content-Type: application/json;charset=UTF-8' \ + -d "$rotatekey_body" | jq + +checkErrors "failed to get keyhash for file" + +printf "\033[32mRotate key integration tests completed successfully\033[0m\n" diff --git a/.github/integration/tests/sda/92_handle_file_errors.sh b/.github/integration/tests/sda/92_handle_file_errors.sh index 7316d1bac..74685836d 100644 --- a/.github/integration/tests/sda/92_handle_file_errors.sh +++ b/.github/integration/tests/sda/92_handle_file_errors.sh @@ -81,8 +81,8 @@ missing_file_payload=$( '$ARGS.named|@base64' ) -FILEID=$(psql -U postgres -h postgres -d sda -At -c "SELECT DISTINCT(file_id) FROM sda.file_event_log WHERE correlation_id = '$CORRID';") -psql -U postgres -h postgres -d sda -At -c "INSERT INTO sda.file_event_log(file_id, event, correlation_id, user_id, message) VALUES('$FILEID', 'uploaded', '$CORRID', 'test@dummy.org', '{\"uploaded\": \"message\"}');" +FILEID=$(psql -U postgres -h postgres -d sda -At -c "SELECT DISTINCT(file_id) FROM sda.file_event_log WHERE file_id = '$CORRID';") +psql -U postgres -h postgres -d sda -At -c "INSERT INTO sda.file_event_log(file_id, event, user_id, message) VALUES('$FILEID', 'uploaded', 'test@dummy.org', '{\"uploaded\": \"message\"}');" properties=$( jq -c -n \ diff --git a/.github/workflows/build_pr_container.yaml b/.github/workflows/build_pr_container.yaml index 97e738f9a..c5b847ab2 100644 --- a/.github/workflows/build_pr_container.yaml +++ b/.github/workflows/build_pr_container.yaml @@ -31,7 +31,7 @@ jobs: string: ${{ github.repository }} - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Log in to the Github Container registry uses: docker/login-action@v3 @@ -81,7 +81,7 @@ jobs: string: ${{ github.repository }} - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Log in to the Github Container registry uses: docker/login-action@v3 @@ -129,7 +129,7 @@ jobs: output: 'postgres-results.sarif' severity: "CRITICAL,HIGH" - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@v4 with: sarif_file: 'postgres-results.sarif' category: postgres @@ -147,7 +147,7 @@ jobs: output: 'rabbitmq-results.sarif' severity: "CRITICAL,HIGH" - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@v4 with: sarif_file: 'rabbitmq-results.sarif' category: rabbitmq @@ -167,7 +167,7 @@ jobs: string: ${{ github.repository }} - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Log in to the Github Container registry uses: docker/login-action@v3 @@ -202,7 +202,7 @@ jobs: output: 'inbox-results.sarif' severity: "CRITICAL,HIGH" - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@v4 with: sarif_file: 'inbox-results.sarif' category: sftp-inbox @@ -247,7 +247,7 @@ jobs: string: ${{ github.repository }} - name: Check out code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Test rabbitmq federation run: docker compose -f .github/integration/rabbitmq-federation.yml run federation_test @@ -265,7 +265,7 @@ jobs: string: ${{ github.repository }} - name: Check out code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Test postgres run: docker compose -f .github/integration/postgres.yml run tests @@ -286,7 +286,7 @@ jobs: string: ${{ github.repository }} - name: Check out code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Test sensitive-data-archive run: docker compose -f .github/integration/sda-${{matrix.storage}}-integration.yml run integration_test @@ -297,7 +297,7 @@ jobs: - build_server_images runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dorny/paths-filter@v3 id: changes with: @@ -320,7 +320,7 @@ jobs: storage: [s3, posix] steps: - name: Check out code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Test sda-doa for ${{ matrix.storage }} storage run: docker compose -f .github/integration/sda-doa-${{ matrix.storage }}-outbox.yml run integration_test @@ -352,7 +352,7 @@ jobs: string: ${{ github.repository }} - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Install Helm uses: azure/setup-helm@v4 diff --git a/.github/workflows/code-linter.yaml b/.github/workflows/code-linter.yaml index 03d87c40b..93bf2ffa2 100644 --- a/.github/workflows/code-linter.yaml +++ b/.github/workflows/code-linter.yaml @@ -18,7 +18,7 @@ jobs: sda: ${{ steps.changes.outputs.sda }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dorny/paths-filter@v3 id: changes with: @@ -43,10 +43,10 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Run golangci-lint - uses: golangci/golangci-lint-action@v8.0.0 + uses: golangci/golangci-lint-action@v9.1.0 with: args: --timeout 5m working-directory: sda-download @@ -64,10 +64,10 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Run golangci-lint - uses: golangci/golangci-lint-action@v8.0.0 + uses: golangci/golangci-lint-action@v9.1.0 with: args: --timeout 5m working-directory: sda @@ -85,10 +85,10 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Run golangci-lint - uses: golangci/golangci-lint-action@v8.0.0 + uses: golangci/golangci-lint-action@v9.1.0 with: args: --timeout 5m working-directory: sda-admin diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d2cd99f94..059043c67 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -25,11 +25,11 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v5 + uses: actions/checkout@v6 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} @@ -57,9 +57,9 @@ jobs: - name: Autobuild if: ${{ matrix.language == 'go' }} - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@v4 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/functionality.yml b/.github/workflows/functionality.yml index 7bd95a029..dbc9b3097 100644 --- a/.github/workflows/functionality.yml +++ b/.github/workflows/functionality.yml @@ -11,7 +11,7 @@ jobs: sftp-inbox: ${{ steps.changes.outputs.sftp-inbox }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dorny/paths-filter@v3 id: changes with: @@ -40,7 +40,7 @@ jobs: python-version: "3.11" - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Run setup scripts run: | @@ -64,7 +64,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Build image run: | diff --git a/.github/workflows/ghcr-actions.yml b/.github/workflows/ghcr-actions.yml index 8cabfb012..3a3d30431 100644 --- a/.github/workflows/ghcr-actions.yml +++ b/.github/workflows/ghcr-actions.yml @@ -18,7 +18,7 @@ jobs: packages: write steps: - name: Delete 'PR' containers older than one month - uses: snok/container-retention-policy@v3.0.0 + uses: snok/container-retention-policy@v3.0.1 with: account: ${{ github.repository_owner }} image-names: sensitive-data-archive diff --git a/.github/workflows/publish_charts.yml b/.github/workflows/publish_charts.yml index 06c0dffb3..7c8fdf96d 100644 --- a/.github/workflows/publish_charts.yml +++ b/.github/workflows/publish_charts.yml @@ -16,7 +16,7 @@ jobs: continue-on-error: true steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: fetch-depth: 0 diff --git a/.github/workflows/publish_container.yml b/.github/workflows/publish_container.yml index 00c86bb71..69636644a 100644 --- a/.github/workflows/publish_container.yml +++ b/.github/workflows/publish_container.yml @@ -24,7 +24,7 @@ jobs: new_tag: ${{ steps.bump_tag.outputs.new_tag }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: '0' - name: Bump version and push tag @@ -46,7 +46,7 @@ jobs: packages: write steps: - name: Check out the repo - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Log in to the Github Container registry uses: docker/login-action@v3 @@ -101,7 +101,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: fetch-depth: '0' diff --git a/.github/workflows/release_sda-admin.yaml b/.github/workflows/release_sda-admin.yaml index 43c7f1d38..5df7953ab 100644 --- a/.github/workflows/release_sda-admin.yaml +++ b/.github/workflows/release_sda-admin.yaml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Get version tag run: | VERSION=$(cat sda-admin/.version) @@ -37,7 +37,7 @@ jobs: - "darwin/arm64" steps: - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Set up Go uses: actions/setup-go@v6 diff --git a/.github/workflows/scan-images.yml b/.github/workflows/scan-images.yml index c1ea49fdc..f764e13f2 100644 --- a/.github/workflows/scan-images.yml +++ b/.github/workflows/scan-images.yml @@ -39,7 +39,7 @@ jobs: output: '${{ matrix.image-name }}-results.sarif' severity: "CRITICAL,HIGH" - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@v4 with: sarif_file: '${{ matrix.image-name }}-results.sarif' category: ${{ matrix.image-name }} @@ -65,7 +65,7 @@ jobs: output: 'sda-results.sarif' severity: "CRITICAL,HIGH" - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + uses: github/codeql-action/upload-sarif@v4 with: sarif_file: 'sda-results.sarif' category: sda \ No newline at end of file diff --git a/.github/workflows/shellcheck.yml b/.github/workflows/shellcheck.yml index caf7e1e37..fba3ef920 100644 --- a/.github/workflows/shellcheck.yml +++ b/.github/workflows/shellcheck.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: check all scripts uses: ludeeus/action-shellcheck@master diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1f666ba6e..2a676fbde 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - name: Set up JDK ${{ matrix.java-version }} uses: actions/setup-java@v5 with: @@ -44,7 +44,7 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Get dependencies run: | @@ -79,7 +79,7 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Get dependencies run: | @@ -114,7 +114,7 @@ jobs: id: go - name: Check out code into the Go module directory - uses: actions/checkout@v5 + uses: actions/checkout@v6 - name: Get dependencies run: | diff --git a/Makefile b/Makefile index ca0f91444..c2c1f7f32 100644 --- a/Makefile +++ b/Makefile @@ -90,6 +90,11 @@ sda-sync-down: integrationtest-postgres: build-postgresql @PR_NUMBER=$$(date +%F) docker compose -f .github/integration/postgres.yml run tests @PR_NUMBER=$$(date +%F) docker compose -f .github/integration/postgres.yml down -v --remove-orphans +integrationtest-postgres-run: build-postgresql + @PR_NUMBER=$$(date +%F) docker compose -f .github/integration/postgres.yml run tests +integrationtest-postgres-down: + @PR_NUMBER=$$(date +%F) docker compose -f .github/integration/postgres.yml down -v --remove-orphans + integrationtest-rabbitmq: build-rabbitmq build-sda @PR_NUMBER=$$(date +%F) docker compose -f .github/integration/rabbitmq-federation.yml run federation_test @PR_NUMBER=$$(date +%F) docker compose -f .github/integration/rabbitmq-federation.yml down -v --remove-orphans diff --git a/charts/sda-svc/templates/api-deploy.yaml b/charts/sda-svc/templates/api-deploy.yaml index e1867e8df..76b503329 100644 --- a/charts/sda-svc/templates/api-deploy.yaml +++ b/charts/sda-svc/templates/api-deploy.yaml @@ -100,6 +100,7 @@ spec: - name: inbox mountPath: "/inbox" {{- end }} + enableServiceLinks: false volumes: {{- if not .Values.global.vaultSecrets }} - name: config diff --git a/charts/sda-svc/templates/auth-deploy.yaml b/charts/sda-svc/templates/auth-deploy.yaml index e72c6c0eb..53099ba02 100644 --- a/charts/sda-svc/templates/auth-deploy.yaml +++ b/charts/sda-svc/templates/auth-deploy.yaml @@ -99,6 +99,7 @@ spec: - name: c4gh mountPath: {{ template "c4ghPath" . }} {{- end }} + enableServiceLinks: false volumes: {{- if not .Values.global.vaultSecrets }} {{- if and (.Values.global.auth.resignJwt) (not .Values.global.vaultSecrets) }} diff --git a/charts/sda-svc/templates/doa-deploy.yaml b/charts/sda-svc/templates/doa-deploy.yaml index 705495196..e26353ae7 100644 --- a/charts/sda-svc/templates/doa-deploy.yaml +++ b/charts/sda-svc/templates/doa-deploy.yaml @@ -281,6 +281,7 @@ spec: - name: archive mountPath: {{ .Values.global.archive.volumePath | quote }} {{- end }} + enableServiceLinks: false volumes: - name: tmp emptyDir: {} diff --git a/charts/sda-svc/templates/download-deploy.yaml b/charts/sda-svc/templates/download-deploy.yaml index 90650a6f0..6d3f4ffb1 100644 --- a/charts/sda-svc/templates/download-deploy.yaml +++ b/charts/sda-svc/templates/download-deploy.yaml @@ -100,6 +100,7 @@ spec: - name: archive mountPath: {{ .Values.global.archive.volumePath | quote }} {{- end }} + enableServiceLinks: false volumes: {{- if and (not .Values.global.pkiService) .Values.global.tls.enabled }} - name: tls diff --git a/charts/sda-svc/templates/finalize-deploy.yaml b/charts/sda-svc/templates/finalize-deploy.yaml index 068f8752a..35705dc2d 100644 --- a/charts/sda-svc/templates/finalize-deploy.yaml +++ b/charts/sda-svc/templates/finalize-deploy.yaml @@ -92,6 +92,7 @@ spec: - name: backup mountPath: {{ .Values.global.backupArchive.volumePath | quote }} {{- end }} + enableServiceLinks: false volumes: {{- if not .Values.global.vaultSecrets }} - name: config diff --git a/charts/sda-svc/templates/ingest-deploy.yaml b/charts/sda-svc/templates/ingest-deploy.yaml index 90de9d467..53ef29d35 100644 --- a/charts/sda-svc/templates/ingest-deploy.yaml +++ b/charts/sda-svc/templates/ingest-deploy.yaml @@ -95,6 +95,7 @@ spec: - name: inbox mountPath: {{ .Values.global.inbox.path | quote }} {{- end }} + enableServiceLinks: false volumes: {{- if not .Values.global.vaultSecrets }} - name: config diff --git a/charts/sda-svc/templates/intercept-deploy.yaml b/charts/sda-svc/templates/intercept-deploy.yaml index 062ae95fb..a9dd3c6ee 100644 --- a/charts/sda-svc/templates/intercept-deploy.yaml +++ b/charts/sda-svc/templates/intercept-deploy.yaml @@ -70,6 +70,7 @@ spec: - name: tls mountPath: {{ template "tlsPath" . }} {{- end }} + enableServiceLinks: false volumes: - name: config projected: diff --git a/charts/sda-svc/templates/mapper-deploy.yaml b/charts/sda-svc/templates/mapper-deploy.yaml index 7c89f0934..3cc270d13 100644 --- a/charts/sda-svc/templates/mapper-deploy.yaml +++ b/charts/sda-svc/templates/mapper-deploy.yaml @@ -71,6 +71,7 @@ spec: - name: inbox mountPath: {{ .Values.global.inbox.path | quote }} {{- end }} + enableServiceLinks: false volumes: {{- if not .Values.global.vaultSecrets }} - name: config diff --git a/charts/sda-svc/templates/re-encrypt-deploy.yaml b/charts/sda-svc/templates/re-encrypt-deploy.yaml index ee7eb0213..8801bd29a 100644 --- a/charts/sda-svc/templates/re-encrypt-deploy.yaml +++ b/charts/sda-svc/templates/re-encrypt-deploy.yaml @@ -73,6 +73,7 @@ spec: - name: tls mountPath: {{ template "tlsPath" . }} {{- end }} + enableServiceLinks: false volumes: {{- if not .Values.global.vaultSecrets }} - name: config diff --git a/charts/sda-svc/templates/release-test-deploy.yml b/charts/sda-svc/templates/release-test-deploy.yml index cc86a47d1..2e8b3d72e 100644 --- a/charts/sda-svc/templates/release-test-deploy.yml +++ b/charts/sda-svc/templates/release-test-deploy.yml @@ -89,6 +89,7 @@ spec: command: [ "/bin/bash" ] args: - "/release-test-app/release-test.sh" + enableServiceLinks: false volumes: {{- if and (not .Values.global.pkiService) .Values.global.tls.enabled }} - name: certs diff --git a/charts/sda-svc/templates/s3-inbox-deploy.yaml b/charts/sda-svc/templates/s3-inbox-deploy.yaml index 5a3470432..a69a66fc4 100644 --- a/charts/sda-svc/templates/s3-inbox-deploy.yaml +++ b/charts/sda-svc/templates/s3-inbox-deploy.yaml @@ -115,6 +115,7 @@ spec: mountPath: {{ include "jwtPath" . }} {{- end }} {{- end }} + enableServiceLinks: false volumes: {{- if and (not .Values.global.pkiService) .Values.global.tls.enabled }} - name: tls diff --git a/charts/sda-svc/templates/sftp-inbox-deploy.yaml b/charts/sda-svc/templates/sftp-inbox-deploy.yaml index 8dcd2528e..67dd1ad7d 100644 --- a/charts/sda-svc/templates/sftp-inbox-deploy.yaml +++ b/charts/sda-svc/templates/sftp-inbox-deploy.yaml @@ -164,6 +164,7 @@ spec: mountPath: "/etc/ssl/certs/java" - name: tmp mountPath: /tmp/ + enableServiceLinks: false volumes: - name: tmp emptyDir: {} diff --git a/charts/sda-svc/templates/sync-deploy.yaml b/charts/sda-svc/templates/sync-deploy.yaml index 536f41e6e..c46c1d610 100644 --- a/charts/sda-svc/templates/sync-deploy.yaml +++ b/charts/sda-svc/templates/sync-deploy.yaml @@ -92,6 +92,7 @@ spec: - name: tls mountPath: {{ template "tlsPath" . }} {{- end }} + enableServiceLinks: false volumes: {{- if and (not .Values.global.pkiService) .Values.global.tls.enabled }} - name: tls diff --git a/charts/sda-svc/templates/syncapi-deploy.yaml b/charts/sda-svc/templates/syncapi-deploy.yaml index 6e0f474a9..98d1bfb7b 100644 --- a/charts/sda-svc/templates/syncapi-deploy.yaml +++ b/charts/sda-svc/templates/syncapi-deploy.yaml @@ -92,6 +92,7 @@ spec: - name: tls mountPath: {{ template "tlsPath" . }} {{- end }} + enableServiceLinks: false volumes: {{- if not .Values.global.vaultSecrets }} - name: config diff --git a/charts/sda-svc/templates/verify-deploy.yaml b/charts/sda-svc/templates/verify-deploy.yaml index 23f30efe1..0d10eaf48 100644 --- a/charts/sda-svc/templates/verify-deploy.yaml +++ b/charts/sda-svc/templates/verify-deploy.yaml @@ -74,6 +74,7 @@ spec: - name: archive mountPath: {{ .Values.global.archive.volumePath | quote }} {{- end }} + enableServiceLinks: false volumes: {{- if and (not .Values.global.pkiService) .Values.global.tls.enabled }} - name: tls diff --git a/postgresql/initdb.d/01_main.sql b/postgresql/initdb.d/01_main.sql index fc931b49e..29ec12198 100644 --- a/postgresql/initdb.d/01_main.sql +++ b/postgresql/initdb.d/01_main.sql @@ -29,8 +29,13 @@ VALUES (0, now(), 'Created with version'), (12, now(), 'Add key hash'), (13, now(), 'Create API user'), (14, now(), 'Create Auth user'), - (15, now(), 'Give API user insert priviledge in logs table'), - (16, now(), 'Give ingest user select priviledge in encryption_keys table'); + (15, now(), 'Give API user insert privilege in logs table'), + (16, now(), 'Give ingest user select privilege in encryption_keys table'), + (17, now(), 'Add submission user to constraint'), + (18, now(), 'Create rotatekey role and grant it privileges to sda tables'), + (19, now(), 'Create new indexes on files and file_event_log tables'), + (20, now(), 'Deprecate file_event_log.correlation_id column and migrate data where file_id != correlation_id'), + (21, now(), 'Drop functions set_verified, and set_archived'); -- Datasets are used to group files, and permissions are set on the dataset -- level @@ -58,6 +63,7 @@ CREATE TABLE files ( submission_user TEXT, submission_file_path TEXT DEFAULT '' NOT NULL, + submission_file_size BIGINT, archive_file_path TEXT DEFAULT '' NOT NULL, archive_file_size BIGINT, @@ -76,6 +82,8 @@ CREATE TABLE files ( CONSTRAINT unique_ingested UNIQUE(submission_file_path, archive_file_path, submission_user) ); +-- Add indexes to the files table +CREATE INDEX files_submission_user_submission_file_path_idx ON files(submission_user, submission_file_path); -- The user info is used by auth to be able to link users to their name and email CREATE TABLE userinfo ( @@ -151,7 +159,6 @@ CREATE TABLE file_event_log ( id SERIAL PRIMARY KEY, file_id UUID REFERENCES files(id), event TEXT REFERENCES file_events(title), - correlation_id UUID, -- Correlation ID in the message's header user_id TEXT, -- Elixir user id (or pipeline-step for ingestion, -- etc.) details JSONB, -- This is my solution to fields such as @@ -165,6 +172,8 @@ CREATE TABLE file_event_log ( success BOOLEAN, error TEXT ); +-- Add indexes to the file_event_log table +CREATE INDEX file_event_log_file_id_started_at_idx ON file_event_log(file_id, started_at); -- This table is used to define events for dataset event logging. CREATE TABLE dataset_events ( diff --git a/postgresql/initdb.d/02_functions.sql b/postgresql/initdb.d/02_functions.sql index 96355ba1a..5b33670b1 100644 --- a/postgresql/initdb.d/02_functions.sql +++ b/postgresql/initdb.d/02_functions.sql @@ -18,57 +18,27 @@ CREATE TRIGGER files_last_modified EXECUTE PROCEDURE files_updated(); -- Function for registering files on upload -CREATE FUNCTION register_file(submission_file_path TEXT, submission_user TEXT) -RETURNS TEXT AS $register_file$ +CREATE FUNCTION sda.register_file(file_id TEXT, submission_file_path TEXT, submission_user TEXT) + RETURNS TEXT AS $register_file$ DECLARE - file_ext TEXT; file_uuid UUID; BEGIN -- Upsert file information. we're not interested in restarted uploads so old -- overwritten files that haven't been ingested are updated instead of -- inserting a new row. - INSERT INTO sda.files( submission_file_path, submission_user, encryption_method ) - VALUES( submission_file_path, submission_user, 'CRYPT4GH' ) - ON CONFLICT ON CONSTRAINT unique_ingested - DO UPDATE SET submission_file_path = EXCLUDED.submission_file_path, - submission_user = EXCLUDED.submission_user, - encryption_method = EXCLUDED.encryption_method - RETURNING id INTO file_uuid; - - -- We add a new event for every registration though, as this might help for - -- debugging. - INSERT INTO sda.file_event_log( file_id, event, user_id ) - VALUES (file_uuid, 'registered', submission_user); - - RETURN file_uuid; +INSERT INTO sda.files( id, submission_file_path, submission_user, encryption_method ) +VALUES( COALESCE(CAST(NULLIF(file_id, '') AS UUID), gen_random_uuid()), submission_file_path, submission_user, 'CRYPT4GH' ) + ON CONFLICT ON CONSTRAINT unique_ingested + DO UPDATE SET submission_file_path = EXCLUDED.submission_file_path, + submission_user = EXCLUDED.submission_user, + encryption_method = EXCLUDED.encryption_method + RETURNING id INTO file_uuid; + +-- We add a new event for every registration though, as this might help for +-- debugging. +INSERT INTO sda.file_event_log( file_id, event, user_id ) +VALUES (file_uuid, 'registered', submission_user); + +RETURN file_uuid; END; $register_file$ LANGUAGE plpgsql; - -CREATE FUNCTION set_archived(file_uuid UUID, corr_id UUID, file_path TEXT, file_size BIGINT, inbox_checksum_value TEXT, inbox_checksum_type TEXT) -RETURNS void AS $set_archived$ -BEGIN - UPDATE sda.files SET archive_file_path = file_path, archive_file_size = file_size WHERE id = file_uuid; - - INSERT INTO sda.checksums(file_id, checksum, type, source) - VALUES(file_uuid, inbox_checksum_value, upper(inbox_checksum_type)::sda.checksum_algorithm, upper('UPLOADED')::sda.checksum_source); - - INSERT INTO sda.file_event_log(file_id, event, correlation_id) VALUES(file_uuid, 'archived', corr_id); -END; - -$set_archived$ LANGUAGE plpgsql; - -CREATE FUNCTION set_verified(file_uuid UUID, corr_id UUID, archive_checksum TEXT, archive_checksum_type TEXT, decrypted_size BIGINT, decrypted_checksum TEXT, decrypted_checksum_type TEXT) -RETURNS void AS $set_verified$ -BEGIN - UPDATE sda.files SET decrypted_file_size = decrypted_size WHERE id = file_uuid; - - INSERT INTO sda.checksums(file_id, checksum, type, source) - VALUES(file_uuid, archive_checksum, upper(archive_checksum_type)::sda.checksum_algorithm, upper('ARCHIVED')::sda.checksum_source); - - INSERT INTO sda.checksums(file_id, checksum, type, source) - VALUES(file_uuid, decrypted_checksum, upper(decrypted_checksum_type)::sda.checksum_algorithm, upper('UNENCRYPTED')::sda.checksum_source); - - INSERT INTO sda.file_event_log(file_id, event, correlation_id) VALUES(file_uuid, 'verified', corr_id); -END; - -$set_verified$ LANGUAGE plpgsql; \ No newline at end of file diff --git a/postgresql/initdb.d/04_grants.sql b/postgresql/initdb.d/04_grants.sql index a04fe93f3..6fed60e31 100644 --- a/postgresql/initdb.d/04_grants.sql +++ b/postgresql/initdb.d/04_grants.sql @@ -126,6 +126,19 @@ GRANT UPDATE ON local_ega.files TO mapper; -------------------------------------------------------------------------------- +CREATE ROLE rotatekey; + +GRANT USAGE ON SCHEMA sda TO rotatekey; +GRANT INSERT ON sda.files TO rotatekey; +GRANT SELECT ON sda.files TO rotatekey; +GRANT UPDATE ON sda.files TO rotatekey; +GRANT SELECT ON sda.checksums TO rotatekey; +GRANT USAGE, SELECT ON SEQUENCE sda.checksums_id_seq TO rotatekey; +GRANT SELECT ON sda.file_event_log TO rotatekey; +GRANT SELECT ON sda.encryption_keys TO rotatekey; + +-------------------------------------------------------------------------------- + CREATE ROLE sync; -- uses: db.GetArchived GRANT USAGE ON SCHEMA sda TO sync; @@ -140,7 +153,6 @@ GRANT UPDATE ON local_ega.main TO sync; -------------------------------------------------------------------------------- - CREATE ROLE download; GRANT USAGE ON SCHEMA sda TO download; diff --git a/postgresql/migratedb.d/14.sql b/postgresql/migratedb.d/14.sql index f39a19af1..d2d2204e9 100644 --- a/postgresql/migratedb.d/14.sql +++ b/postgresql/migratedb.d/14.sql @@ -41,6 +41,9 @@ BEGIN GRANT SELECT, INSERT, UPDATE ON sda.userinfo TO auth; GRANT base TO auth; + + -- Drop temporary user creation function + DROP FUNCTION create_role_if_not_exists; ELSE RAISE NOTICE 'Schema migration from % to % does not apply now, skipping', sourcever, sourcever+1; END IF; diff --git a/postgresql/migratedb.d/15.sql b/postgresql/migratedb.d/15.sql index 2849382c4..d58135fbc 100644 --- a/postgresql/migratedb.d/15.sql +++ b/postgresql/migratedb.d/15.sql @@ -5,7 +5,7 @@ DECLARE -- The version we know how to do migration from, at the end of a successful migration -- we will no longer be at this version. sourcever INTEGER := 14; - changes VARCHAR := 'Give API user insert priviledge in logs table'; + changes VARCHAR := 'Give API user insert privilege in logs table'; BEGIN IF (select max(version) from sda.dbschema_version) = sourcever then RAISE NOTICE 'Doing migration from schema version % to %', sourcever, sourcever+1; diff --git a/postgresql/migratedb.d/16.sql b/postgresql/migratedb.d/16.sql index 138e4fb9c..1783a051a 100644 --- a/postgresql/migratedb.d/16.sql +++ b/postgresql/migratedb.d/16.sql @@ -5,7 +5,7 @@ DECLARE -- The version we know how to do migration from, at the end of a successful migration -- we will no longer be at this version. sourcever INTEGER := 15; - changes VARCHAR := 'Give ingest user select priviledge in encryption_keys table'; + changes VARCHAR := 'Give ingest user select privilege in encryption_keys table'; BEGIN IF (select max(version) from sda.dbschema_version) = sourcever then RAISE NOTICE 'Doing migration from schema version % to %', sourcever, sourcever+1; diff --git a/postgresql/migratedb.d/18.sql b/postgresql/migratedb.d/18.sql new file mode 100644 index 000000000..036128875 --- /dev/null +++ b/postgresql/migratedb.d/18.sql @@ -0,0 +1,51 @@ + +DO +$$ +DECLARE +-- The version we know how to do migration from, at the end of a successful migration +-- we will no longer be at this version. + sourcever INTEGER := 17; + changes VARCHAR := 'Create rotatekey role and grant it privileges to sda tables'; +BEGIN + IF (select max(version) from sda.dbschema_version) = sourcever then + RAISE NOTICE 'Doing migration from schema version % to %', sourcever, sourcever+1; + RAISE NOTICE 'Changes: %', changes; + INSERT INTO sda.dbschema_version VALUES(sourcever+1, now(), changes); + + -- Temporary function for creating roles if they do not already exist. + CREATE FUNCTION create_role_if_not_exists(role_name NAME) RETURNS void AS $created$ + BEGIN + IF EXISTS ( + SELECT FROM pg_catalog.pg_roles + WHERE rolname = role_name) THEN + RAISE NOTICE 'Role "%" already exists. Skipping.', role_name; + ELSE + BEGIN + EXECUTE format('CREATE ROLE %I', role_name); + EXCEPTION + WHEN duplicate_object THEN + RAISE NOTICE 'Role "%" was just created by a concurrent transaction. Skipping.', role_name; + END; + END IF; + END; + $created$ LANGUAGE plpgsql; + + PERFORM create_role_if_not_exists('rotatekey'); + + GRANT USAGE ON SCHEMA sda TO rotatekey; + GRANT INSERT ON sda.files TO rotatekey; + GRANT SELECT ON sda.files TO rotatekey; + GRANT UPDATE ON sda.files TO rotatekey; + GRANT SELECT ON sda.checksums TO rotatekey; + GRANT USAGE, SELECT ON SEQUENCE sda.checksums_id_seq TO rotatekey; + GRANT SELECT ON sda.file_event_log TO rotatekey; + GRANT SELECT ON sda.encryption_keys TO rotatekey; + + -- Drop temporary user creation function + DROP FUNCTION create_role_if_not_exists; + + ELSE + RAISE NOTICE 'Schema migration from % to % does not apply now, skipping', sourcever, sourcever+1; + END IF; +END +$$ diff --git a/postgresql/migratedb.d/19_add_index_on_files_and_file_event_log.sql b/postgresql/migratedb.d/19_add_index_on_files_and_file_event_log.sql new file mode 100644 index 000000000..c559b3980 --- /dev/null +++ b/postgresql/migratedb.d/19_add_index_on_files_and_file_event_log.sql @@ -0,0 +1,24 @@ + +DO +$$ +DECLARE +-- The version we know how to do migration from, at the end of a successful migration +-- we will no longer be at this version. + sourcever INTEGER := 18; + changes VARCHAR := 'Create new indexes on files and file_event_log tables'; +BEGIN + IF (select max(version) from sda.dbschema_version) = sourcever then + RAISE NOTICE 'Doing migration from schema version % to %', sourcever, sourcever+1; + RAISE NOTICE 'Changes: %', changes; + INSERT INTO sda.dbschema_version VALUES(sourcever+1, now(), changes); + + CREATE INDEX file_event_log_file_id_started_at_idx ON sda.file_event_log(file_id, started_at); + + CREATE INDEX files_submission_user_submission_file_path_idx + ON sda.files(submission_user, submission_file_path); + + ELSE + RAISE NOTICE 'Schema migration from % to % does not apply now, skipping', sourcever, sourcever+1; + END IF; +END +$$ diff --git a/postgresql/migratedb.d/20_deprecate_files_event_log_correlation_id.sql b/postgresql/migratedb.d/20_deprecate_files_event_log_correlation_id.sql new file mode 100644 index 000000000..4b7e5e4ae --- /dev/null +++ b/postgresql/migratedb.d/20_deprecate_files_event_log_correlation_id.sql @@ -0,0 +1,83 @@ + +DO +$$ +DECLARE +-- The version we know how to do migration from, at the end of a successful migration +-- we will no longer be at this version. + sourcever INTEGER := 19; + changes VARCHAR := 'Deprecate file_event_log.correlation_id column and migrate data where file_id != correlation_id'; +BEGIN + IF (select max(version) from sda.dbschema_version) = sourcever then + RAISE NOTICE 'Doing migration from schema version % to %', sourcever, sourcever+1; + RAISE NOTICE 'Changes: %', changes; + INSERT INTO sda.dbschema_version VALUES(sourcever+1, now(), changes); + + -- Migrate data where files.id != file_event_log.correlation_id + + -- First drop foreign key constraint so we can update values without constraint restriction + ALTER TABLE sda.file_event_log + DROP CONSTRAINT file_event_log_file_id_fkey; + + -- Update all files which have a file_event_log where file_id != correlation_id + UPDATE sda.files AS f + SET id = fel.correlation_id + FROM sda.file_event_log AS fel + WHERE f.id = fel.file_id + AND fel.file_id != fel.correlation_id + AND fel.correlation_id IS NOT NULL; + + -- Update all file_event_log where file_id != correlation_id + UPDATE sda.file_event_log AS f + SET file_id = fel.correlation_id + FROM sda.file_event_log AS fel + WHERE f.file_id = fel.file_id + AND fel.file_id != fel.correlation_id + AND fel.correlation_id IS NOT NULL; + + -- Add back the foreign key constraint + ALTER TABLE sda.file_event_log + ADD CONSTRAINT file_event_log_file_id_fkey FOREIGN KEY (file_id) + REFERENCES sda.files(id); + + + -- Update RegisterFile func + -- First drop it so we can create the updated version + DROP FUNCTION IF EXISTS sda.register_file; + + + -- Create updated function + -- Function for registering files on upload + CREATE FUNCTION sda.register_file(file_id TEXT, submission_file_path TEXT, submission_user TEXT) + RETURNS TEXT AS $register_file$ + DECLARE + file_uuid UUID; + BEGIN + -- Upsert file information. we're not interested in restarted uploads so old + -- overwritten files that haven't been ingested are updated instead of + -- inserting a new row. + INSERT INTO sda.files( id, submission_file_path, submission_user, encryption_method ) + VALUES( COALESCE(CAST(NULLIF(file_id, '') AS UUID), gen_random_uuid()), submission_file_path, submission_user, 'CRYPT4GH' ) + ON CONFLICT ON CONSTRAINT unique_ingested + DO UPDATE SET submission_file_path = EXCLUDED.submission_file_path, + submission_user = EXCLUDED.submission_user, + encryption_method = EXCLUDED.encryption_method + RETURNING id INTO file_uuid; + + -- We add a new event for every registration though, as this might help for + -- debugging. + INSERT INTO sda.file_event_log( file_id, event, user_id ) + VALUES (file_uuid, 'registered', submission_user); + + RETURN file_uuid; + END; + $register_file$ LANGUAGE plpgsql; + + -- Drop the correlation_id column from sda.file_event_log + ALTER TABLE sda.file_event_log + DROP COLUMN correlation_id; + +ELSE + RAISE NOTICE 'Schema migration from % to % does not apply now, skipping', sourcever, sourcever+1; + END IF; +END +$$ diff --git a/postgresql/migratedb.d/21_drop_unused_functions_set_verified_and_set_archived.sql b/postgresql/migratedb.d/21_drop_unused_functions_set_verified_and_set_archived.sql new file mode 100644 index 000000000..bde25978c --- /dev/null +++ b/postgresql/migratedb.d/21_drop_unused_functions_set_verified_and_set_archived.sql @@ -0,0 +1,26 @@ + +DO +$$ +DECLARE +-- The version we know how to do migration from, at the end of a successful migration +-- we will no longer be at this version. + sourcever INTEGER := 20; + changes VARCHAR := 'Drop functions set_verified, and set_archived'; +BEGIN + IF (select max(version) from sda.dbschema_version) = sourcever then + RAISE NOTICE 'Doing migration from schema version % to %', sourcever, sourcever+1; + RAISE NOTICE 'Changes: %', changes; + INSERT INTO sda.dbschema_version VALUES(sourcever+1, now(), changes); + + + -- Drop set_verified func, as not in use + DROP FUNCTION IF EXISTS sda.set_verified; + + -- Drop set_archived func, as not in use + DROP FUNCTION IF EXISTS sda.set_archived; + +ELSE + RAISE NOTICE 'Schema migration from % to % does not apply now, skipping', sourcever, sourcever+1; + END IF; +END +$$ diff --git a/rabbitmq/definitions.json b/rabbitmq/definitions.json index 7d324acfe..f4955963e 100644 --- a/rabbitmq/definitions.json +++ b/rabbitmq/definitions.json @@ -149,6 +149,13 @@ "auto_delete": false, "arguments": {} }, + { + "name": "rotatekey", + "vhost": "sda", + "durable": true, + "auto_delete": false, + "arguments": {} + }, { "name": "catch_all.dead", "vhost": "sda", @@ -292,6 +299,14 @@ "destination": "verified", "routing_key": "verified" }, + { + "source": "sda", + "vhost": "sda", + "destination_type": "queue", + "arguments": {}, + "destination": "rotatekey", + "routing_key": "rotatekey" + }, { "source": "sda.dead", "vhost": "sda", diff --git a/sda-admin/README.md b/sda-admin/README.md index fe9851036..fd8bbf083 100644 --- a/sda-admin/README.md +++ b/sda-admin/README.md @@ -28,22 +28,35 @@ Use the following command to return all files belonging to the specified user `t sda-admin file list -user test-user@example.org ``` + ## Ingest a file -Use the following command to trigger the ingesting of a given file `/path/to/file.c4gh` that belongs to the user `test-user@example.org` +You can ingest a file either by specifying its path and user, or by using its file ID: +**By file path and user:** ```sh sda-admin file ingest -filepath /path/to/file.c4gh -user test-user@example.org ``` +**By file ID:** +```sh +sda-admin file ingest -fileid +``` + ## Assign an accession ID to a file -Use the following command to assign an accession ID `my-accession-id-1` to a given file `/path/to/file.c4gh` that belongs to the user `test-user@example.org` +You can assign an accession ID to a file either by specifying its path and user, or by using its file ID: +**By file path and user:** ```sh sda-admin file set-accession -filepath /path/to/file.c4gh -user test-user@example.org -accession-id my-accession-id-1 ``` +**By file ID:** +```sh +sda-admin file set-accession -fileid -accession-id my-accession-id-1 +``` + ## Create a dataset from a list of accession IDs and a dataset ID Use the following command to create a dataset `dataset001` from accession IDs `my-accession-id-1` and `my-accession-id-2` for files that belongs to the user `test-user@example.org` diff --git a/sda-admin/file/file.go b/sda-admin/file/file.go index dbb9dc5de..e84cd5d85 100644 --- a/sda-admin/file/file.go +++ b/sda-admin/file/file.go @@ -39,25 +39,38 @@ func List(apiURI, token, username string) error { return nil } -// Ingest triggers the ingestion of a given file -func Ingest(apiURI, token, username, filepath string) error { - parsedURL, err := url.Parse(apiURI) +// Ingest triggers the ingestion of a file via the SDA API. +// Depending on the provided fields in ingestInfo: +// - If ingestInfo.Id is empty, it sends a POST request to /file/ingest with a JSON body containing the file path and user. +// - If ingestInfo.Id is set, it sends a POST request to /file/ingest with the fileid as a query parameter and no JSON body. +func Ingest(ingestInfo helpers.FileInfo) error { + var jsonBody []byte + parsedURL, err := url.Parse(ingestInfo.URL) if err != nil { return err } parsedURL.Path = path.Join(parsedURL.Path, "file/ingest") - requestBody := RequestBodyFileIngest{ - Filepath: filepath, - User: username, - } - - jsonBody, err := json.Marshal(requestBody) - if err != nil { - return fmt.Errorf("failed to marshal JSON, reason: %v", err) + if ingestInfo.ID == "" { + if err := helpers.CheckValidChars(ingestInfo.Path); err != nil { + return err + } + requestBody := RequestBodyFileIngest{ + Filepath: ingestInfo.Path, + User: ingestInfo.User, + } + jsonBody, err = json.Marshal(requestBody) + if err != nil { + return fmt.Errorf("failed to marshal JSON, reason: %v", err) + } + } else { + query := parsedURL.Query() + query.Set("fileid", ingestInfo.ID) + parsedURL.RawQuery = query.Encode() + jsonBody = nil } - _, err = helpers.PostRequest(parsedURL.String(), token, jsonBody) + _, err = helpers.PostRequest(parsedURL.String(), ingestInfo.Token, jsonBody) if err != nil { return err } @@ -65,26 +78,40 @@ func Ingest(apiURI, token, username, filepath string) error { return nil } -// SetAccession assigns an accession ID to a specified file for a given user -func SetAccession(apiURI, token, username, filepath, accessionID string) error { - parsedURL, err := url.Parse(apiURI) +// SetAccession assigns an accession ID to a file via the SDA API. +// Depending on the provided fields in accessionInfo: +// - If accessionInfo.Id is empty, it sends a POST request to /file/accession with a JSON body containing accession_id, filepath, and user. +// - If accessionInfo.Id is set, it sends a POST request to /file/accession with fileid and accessionid as query parameters. +func SetAccession(accessionInfo helpers.FileInfo) error { + var jsonBody []byte + parsedURL, err := url.Parse(accessionInfo.URL) if err != nil { return err } parsedURL.Path = path.Join(parsedURL.Path, "file/accession") - requestBody := RequestBodyFileAccession{ - AccessionID: accessionID, - Filepath: filepath, - User: username, - } - - jsonBody, err := json.Marshal(requestBody) - if err != nil { - return fmt.Errorf("failed to marshal JSON, reason: %v", err) + if accessionInfo.ID == "" { + if err := helpers.CheckValidChars(accessionInfo.Path); err != nil { + return err + } + requestBody := RequestBodyFileAccession{ + AccessionID: accessionInfo.Accession, + Filepath: accessionInfo.Path, + User: accessionInfo.User, + } + jsonBody, err = json.Marshal(requestBody) + if err != nil { + return fmt.Errorf("failed to marshal JSON, reason: %v", err) + } + } else { + query := parsedURL.Query() + query.Set("fileid", accessionInfo.ID) + query.Set("accessionid", accessionInfo.Accession) + parsedURL.RawQuery = query.Encode() + jsonBody = nil } - _, err = helpers.PostRequest(parsedURL.String(), token, jsonBody) + _, err = helpers.PostRequest(parsedURL.String(), accessionInfo.Token, jsonBody) if err != nil { return err } diff --git a/sda-admin/file/file_test.go b/sda-admin/file/file_test.go index 42ad532a9..c14bf519f 100644 --- a/sda-admin/file/file_test.go +++ b/sda-admin/file/file_test.go @@ -42,81 +42,169 @@ func TestList(t *testing.T) { mockHelpers.AssertExpectations(t) } -func TestIngest_Success(t *testing.T) { +func TestIngestPath_Success(t *testing.T) { mockHelpers := new(MockHelpers) originalFunc := helpers.PostRequest helpers.PostRequest = mockHelpers.PostRequest defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + var ingestInfo helpers.FileInfo expectedURL := "http://example.com/file/ingest" - token := "test-token" - username := "test-user" - filepath := "/path/to/file" + ingestInfo.URL = "http://example.com" + ingestInfo.Token = "test-token" + ingestInfo.User = "test-user" + ingestInfo.Path = "/path/to/file" jsonBody := []byte(`{"filepath":"/path/to/file","user":"test-user"}`) - mockHelpers.On("PostRequest", expectedURL, token, jsonBody).Return([]byte(`{}`), nil) + mockHelpers.On("PostRequest", expectedURL, ingestInfo.Token, jsonBody).Return([]byte(`{}`), nil) - err := Ingest("http://example.com", token, username, filepath) + err := Ingest(ingestInfo) assert.NoError(t, err) mockHelpers.AssertExpectations(t) } -func TestIngest_PostRequestFailure(t *testing.T) { +func TestIngestPath_PostRequestFailure(t *testing.T) { mockHelpers := new(MockHelpers) originalFunc := helpers.PostRequest helpers.PostRequest = mockHelpers.PostRequest defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + var ingestInfo helpers.FileInfo expectedURL := "http://example.com/file/ingest" - token := "test-token" - username := "test-user" - filepath := "/path/to/file" + ingestInfo.URL = "http://example.com" + ingestInfo.Token = "test-token" + ingestInfo.User = "test-user" + ingestInfo.Path = "/path/to/file" jsonBody := []byte(`{"filepath":"/path/to/file","user":"test-user"}`) - mockHelpers.On("PostRequest", expectedURL, token, jsonBody).Return([]byte(nil), errors.New("failed to send request")) + mockHelpers.On("PostRequest", expectedURL, ingestInfo.Token, jsonBody).Return([]byte(nil), errors.New("failed to send request")) - err := Ingest("http://example.com", token, username, filepath) + err := Ingest(ingestInfo) assert.Error(t, err) assert.EqualError(t, err, "failed to send request") mockHelpers.AssertExpectations(t) } -func TestSetAccession_Success(t *testing.T) { +func TestIngestID_Success(t *testing.T) { mockHelpers := new(MockHelpers) originalFunc := helpers.PostRequest helpers.PostRequest = mockHelpers.PostRequest defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + var ingestInfo helpers.FileInfo + expectedURL := "http://example.com/file/ingest?fileid=dd813b8a-ea90-4556-b640-32039733a31f" + ingestInfo.URL = "http://example.com" + ingestInfo.Token = "test-token" + ingestInfo.ID = "dd813b8a-ea90-4556-b640-32039733a31f" + + mockHelpers.On("PostRequest", expectedURL, ingestInfo.Token, []byte(nil)).Return([]byte(`{}`), nil) + + err := Ingest(ingestInfo) + assert.NoError(t, err) + mockHelpers.AssertExpectations(t) +} + +func TestIngestID_PostRequestFailure(t *testing.T) { + mockHelpers := new(MockHelpers) + originalFunc := helpers.PostRequest + helpers.PostRequest = mockHelpers.PostRequest + defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + + var ingestInfo helpers.FileInfo + expectedURL := "http://example.com/file/ingest?fileid=dd813b8a-ea90-4556-b640-32039733a31f" + ingestInfo.URL = "http://example.com" + ingestInfo.Token = "test-token" + ingestInfo.ID = "dd813b8a-ea90-4556-b640-32039733a31f" + + mockHelpers.On("PostRequest", expectedURL, ingestInfo.Token, []byte(nil)).Return([]byte(nil), errors.New("failed to send request")) + + err := Ingest(ingestInfo) + assert.Error(t, err) + assert.EqualError(t, err, "failed to send request") + mockHelpers.AssertExpectations(t) +} + +func TestSetAccessionPath_Success(t *testing.T) { + mockHelpers := new(MockHelpers) + originalFunc := helpers.PostRequest + helpers.PostRequest = mockHelpers.PostRequest + defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + + var accessionInfo helpers.FileInfo expectedURL := "http://example.com/file/accession" - token := "test-token" - username := "test-user" - filepath := "/path/to/file" - accessionID := "accession-123" + accessionInfo.URL = "http://example.com" + accessionInfo.Token = "test-token" + accessionInfo.User = "test-user" + accessionInfo.Path = "/path/to/file" + accessionInfo.Accession = "accession-123" jsonBody := []byte(`{"accession_id":"accession-123","filepath":"/path/to/file","user":"test-user"}`) - mockHelpers.On("PostRequest", expectedURL, token, jsonBody).Return([]byte(`{}`), nil) + mockHelpers.On("PostRequest", expectedURL, accessionInfo.Token, jsonBody).Return([]byte(`{}`), nil) - err := SetAccession("http://example.com", token, username, filepath, accessionID) + err := SetAccession(accessionInfo) assert.NoError(t, err) mockHelpers.AssertExpectations(t) } -func TestSetAccession_PostRequestFailure(t *testing.T) { +func TestSetAccessionPath_PostRequestFailure(t *testing.T) { mockHelpers := new(MockHelpers) originalFunc := helpers.PostRequest helpers.PostRequest = mockHelpers.PostRequest defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + var accessionInfo helpers.FileInfo expectedURL := "http://example.com/file/accession" - token := "test-token" - username := "test-user" - filepath := "/path/to/file" - accessionID := "accession-123" + accessionInfo.URL = "http://example.com" + accessionInfo.Token = "test-token" + accessionInfo.User = "test-user" + accessionInfo.Path = "/path/to/file" + accessionInfo.Accession = "accession-123" jsonBody := []byte(`{"accession_id":"accession-123","filepath":"/path/to/file","user":"test-user"}`) - mockHelpers.On("PostRequest", expectedURL, token, jsonBody).Return([]byte(nil), errors.New("failed to send request")) + mockHelpers.On("PostRequest", expectedURL, accessionInfo.Token, jsonBody).Return([]byte(nil), errors.New("failed to send request")) + + err := SetAccession(accessionInfo) + assert.Error(t, err) + assert.EqualError(t, err, "failed to send request") + mockHelpers.AssertExpectations(t) +} + +func TestSetAccessionID_Success(t *testing.T) { + mockHelpers := new(MockHelpers) + originalFunc := helpers.PostRequest + helpers.PostRequest = mockHelpers.PostRequest + defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + + var accessionInfo helpers.FileInfo + expectedURL := "http://example.com/file/accession?accessionid=accession-123&fileid=dd813b8a-ea90-4556-b640-32039733a31f" + accessionInfo.URL = "http://example.com" + accessionInfo.Token = "test-token" + accessionInfo.ID = "dd813b8a-ea90-4556-b640-32039733a31f" + accessionInfo.Accession = "accession-123" + + mockHelpers.On("PostRequest", expectedURL, accessionInfo.Token, []byte(nil)).Return([]byte(`{}`), nil) + + err := SetAccession(accessionInfo) + assert.NoError(t, err) + mockHelpers.AssertExpectations(t) +} + +func TestSetAccessionID_PostRequestFailure(t *testing.T) { + mockHelpers := new(MockHelpers) + originalFunc := helpers.PostRequest + helpers.PostRequest = mockHelpers.PostRequest + defer func() { helpers.PostRequest = originalFunc }() // Restore original after test + + var accessionInfo helpers.FileInfo + expectedURL := "http://example.com/file/accession?accessionid=accession-123&fileid=dd813b8a-ea90-4556-b640-32039733a31f" + accessionInfo.URL = "http://example.com" + accessionInfo.Token = "test-token" + accessionInfo.ID = "dd813b8a-ea90-4556-b640-32039733a31f" + accessionInfo.Accession = "accession-123" + + mockHelpers.On("PostRequest", expectedURL, accessionInfo.Token, []byte(nil)).Return([]byte(nil), errors.New("failed to send request")) - err := SetAccession("http://example.com", token, username, filepath, accessionID) + err := SetAccession(accessionInfo) assert.Error(t, err) assert.EqualError(t, err, "failed to send request") mockHelpers.AssertExpectations(t) diff --git a/sda-admin/go.mod b/sda-admin/go.mod index bb272c942..6a388feef 100644 --- a/sda-admin/go.mod +++ b/sda-admin/go.mod @@ -14,7 +14,7 @@ require ( github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - golang.org/x/crypto v0.35.0 // indirect - golang.org/x/sys v0.30.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/sys v0.38.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sda-admin/go.sum b/sda-admin/go.sum index b244f10f2..b9308fa25 100644 --- a/sda-admin/go.sum +++ b/sda-admin/go.sum @@ -14,12 +14,12 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= -golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/sda-admin/helpers/helpers.go b/sda-admin/helpers/helpers.go index 352ee75b6..ed5d108aa 100644 --- a/sda-admin/helpers/helpers.go +++ b/sda-admin/helpers/helpers.go @@ -12,6 +12,15 @@ import ( // necessary for mocking in unit tests var GetResponseBody = GetBody +type FileInfo struct { + User string + Path string + ID string + URL string + Token string + Accession string +} + // GetBody sends a GET request to the given URL and returns the body of the response func GetBody(url, token string) ([]byte, error) { req, err := http.NewRequest("GET", url, nil) @@ -50,7 +59,6 @@ var PostRequest = PostReq // PostReq sends a POST request to the server with a JSON body and returns the response body or an error. func PostReq(url, token string, jsonBody []byte) ([]byte, error) { - // Create a new POST request with the provided JSON body req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { return nil, fmt.Errorf("failed to create the request, reason: %v", err) diff --git a/sda-admin/helpers/helpers_test.go b/sda-admin/helpers/helpers_test.go index 6829d1ecd..9570eabcb 100644 --- a/sda-admin/helpers/helpers_test.go +++ b/sda-admin/helpers/helpers_test.go @@ -64,6 +64,11 @@ func TestPostReq(t *testing.T) { body, err = PostReq(serverError.URL, "mock_token", []byte(`{"name":"test"}`)) assert.Error(t, err) assert.Nil(t, body) + + // Test nil jsonBody case + body, err = PostReq(server.URL, "mock_token", nil) + assert.NoError(t, err) + assert.JSONEq(t, mockResponse, string(body)) } func TestInvalidCharacters(t *testing.T) { diff --git a/sda-admin/main.go b/sda-admin/main.go index 62490f6aa..8cd4b56ed 100644 --- a/sda-admin/main.go +++ b/sda-admin/main.go @@ -76,19 +76,25 @@ var fileListUsage = `Usage: sda-admin file list -user USERNAME Options: -user USERNAME Specify the username associated with the files.` -var fileIngestUsage = `Usage: sda-admin file ingest -filepath FILEPATH -user USERNAME - Trigger the ingestion of a given file for a specific user. +var fileIngestUsage = `Usage with file path and user: sda-admin file ingest -filepath FILEPATH -user USERNAME +Usage with file ID: sda-admin file ingest -fileid FILEUUID + + Trigger the ingestion either by providing filepath and user or file ID. Options: -filepath FILEPATH Specify the path of the file to ingest. - -user USERNAME Specify the username associated with the file.` + -user USERNAME Specify the username associated with the file. + -fileid FILEUUID Specify the file ID (UUID) of the file to ingest.` -var fileAccessionUsage = `Usage: sda-admin file set-accession -filepath FILEPATH -user USERNAME -accession-id ACCESSION_ID - Assign accession ID to a file and associate it with a user. +var fileAccessionUsage = `Usage with file path and user: sda-admin file set-accession -filepath FILEPATH -user USERNAME -accession-id ACCESSION_ID +Usage with file ID: sda-admin file set-accession -fileid FILEUUID -accession-id ACCESSION_ID + + Assign accession ID to a file by providing filepath and user or file ID. Options: -filepath FILEPATH Specify the path of the file to assign the accession ID. -user USERNAME Specify the username associated with the file. + -fileid FILEUUID Specify the file ID of the file to assign the accession ID. -accession-id ID Specify the accession ID to assign to the file.` var datasetUsage = `Create a dataset: @@ -336,55 +342,67 @@ func handleFileCommand() error { func handleFileIngestCommand() error { fileIngestCmd := flag.NewFlagSet("ingest", flag.ExitOnError) - var filepath, username string - fileIngestCmd.StringVar(&filepath, "filepath", "", "Filepath to ingest") - fileIngestCmd.StringVar(&username, "user", "", "Username to associate with the file") + var ingestInfo helpers.FileInfo + ingestInfo.URL = apiURI + ingestInfo.Token = token + fileIngestCmd.StringVar(&ingestInfo.Path, "filepath", "", "Filepath to ingest") + fileIngestCmd.StringVar(&ingestInfo.User, "user", "", "Username to associate with the file") + fileIngestCmd.StringVar(&ingestInfo.ID, "fileid", "", "File ID (UUID) to ingest") if err := fileIngestCmd.Parse(flag.Args()[2:]); err != nil { return fmt.Errorf("error: failed to parse command line arguments, reason: %v", err) } - if filepath == "" || username == "" { - return fmt.Errorf("error: both -filepath and -user are required.\n%s", fileIngestUsage) - } - - if err := helpers.CheckValidChars(filepath); err != nil { - return err - } + switch { + case ingestInfo.Path == "" && ingestInfo.User == "" && ingestInfo.ID == "": + return fmt.Errorf("error: either -filepath and -user pair or -fileid are required.\n%s", fileIngestUsage) + case ingestInfo.ID != "" && (ingestInfo.Path != "" || ingestInfo.User != ""): + return fmt.Errorf("error: choose if -filepath and -user pair or -fileid will be used.\n%s", fileIngestUsage) + case ingestInfo.ID == "" && (ingestInfo.Path == "" || ingestInfo.User == ""): + return fmt.Errorf("error: both -filepath and -user must be provided together.\n%s", fileIngestUsage) + default: + err := file.Ingest(ingestInfo) + if err != nil { + return fmt.Errorf("error: failed to ingest file, reason: %v", err) + } - err := file.Ingest(apiURI, token, username, filepath) - if err != nil { - return fmt.Errorf("error: failed to ingest file, reason: %v", err) + return nil } - - return nil } func handleFileAccessionCommand() error { fileAccessionCmd := flag.NewFlagSet("set-accession", flag.ExitOnError) - var filepath, username, accessionID string - fileAccessionCmd.StringVar(&filepath, "filepath", "", "Filepath to assign accession ID") - fileAccessionCmd.StringVar(&username, "user", "", "Username to associate with the file") - fileAccessionCmd.StringVar(&accessionID, "accession-id", "", "Accession ID to assign") + var accessionInfo helpers.FileInfo + accessionInfo.URL = apiURI + accessionInfo.Token = token + fileAccessionCmd.StringVar(&accessionInfo.Path, "filepath", "", "Filepath to assign accession ID") + fileAccessionCmd.StringVar(&accessionInfo.User, "user", "", "Username to associate with the file") + fileAccessionCmd.StringVar(&accessionInfo.Accession, "accession-id", "", "Accession ID to assign") + fileAccessionCmd.StringVar(&accessionInfo.ID, "fileid", "", "File ID (UUID) to ingest") if err := fileAccessionCmd.Parse(flag.Args()[2:]); err != nil { return fmt.Errorf("error: failed to parse command line arguments, reason: %v", err) } - if filepath == "" || username == "" || accessionID == "" { + switch { + case accessionInfo.ID == "" && accessionInfo.Path == "" && accessionInfo.User == "" && accessionInfo.Accession == "": + return fmt.Errorf("error: no arguments provided.\n%s", fileAccessionUsage) + case accessionInfo.ID == "" && (accessionInfo.Path == "" || accessionInfo.User == "" || accessionInfo.Accession == ""): return fmt.Errorf("error: -filepath, -user, and -accession-id are required.\n%s", fileAccessionUsage) - } - - if err := helpers.CheckValidChars(filepath); err != nil { - return err - } + case accessionInfo.ID != "" && accessionInfo.Accession != "" && (accessionInfo.Path != "" || accessionInfo.User != ""): + return fmt.Errorf("error: when using -fileid, do not provide -filepath or -user together. Only -fileid and -accession-id are allowed.\n%s", fileAccessionUsage) + case accessionInfo.ID != "" && accessionInfo.Accession == "" && (accessionInfo.Path == "" && accessionInfo.User == ""): + return fmt.Errorf("error: -accession-id is required.\n%s", fileAccessionUsage) + case accessionInfo.ID == "" && accessionInfo.Path != "" && accessionInfo.User != "" && accessionInfo.Accession == "": + return fmt.Errorf("error: -accession-id is required.\n%s", fileAccessionUsage) + default: + err := file.SetAccession(accessionInfo) + if err != nil { + return fmt.Errorf("error: failed to assign accession ID to file, reason: %v", err) + } - err := file.SetAccession(apiURI, token, username, filepath, accessionID) - if err != nil { - return fmt.Errorf("error: failed to assign accession ID to file, reason: %v", err) + return nil } - - return nil } func handleDatasetCommand() error { diff --git a/sda-doa/pom.xml b/sda-doa/pom.xml index a7d98e50c..131aab4e9 100644 --- a/sda-doa/pom.xml +++ b/sda-doa/pom.xml @@ -5,7 +5,7 @@ org.springframework.boot spring-boot-starter-parent - 3.5.5 + 4.0.0 no.uio.ifi @@ -18,11 +18,6 @@ - - com.google.code.gson - gson - 2.13.1 - org.springframework.boot spring-boot-starter-data-jpa @@ -35,6 +30,10 @@ org.springframework.boot spring-boot-starter-amqp + + org.springframework.boot + spring-boot-starter-gson + org.postgresql @@ -84,12 +83,12 @@ io.minio minio - 8.5.17 + 8.6.0 net.logstash.logback logstash-logback-encoder - 8.1 + 9.0 org.springframework.boot @@ -110,7 +109,7 @@ no.elixir crypt4gh - 3.0.33 + 3.0.38 org.slf4j @@ -121,7 +120,7 @@ no.elixir clearinghouse - 3.0.8 + 3.0.12 org.slf4j @@ -134,6 +133,11 @@ snakeyaml 2.5 + + com.squareup.okhttp3 + okhttp-jvm + 5.3.2 + diff --git a/sda-download/.github/integration/tests/common/50_check_endpoint.sh b/sda-download/.github/integration/tests/common/50_check_endpoint.sh index 5a985fc8d..eb10a9ac3 100755 --- a/sda-download/.github/integration/tests/common/50_check_endpoint.sh +++ b/sda-download/.github/integration/tests/common/50_check_endpoint.sh @@ -29,11 +29,10 @@ fi echo "Head method health endpoint is ok" - # ------------------ # Test empty token -check_401=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET --cacert certs/ca.pem https://localhost:8443/metadata/datasets) +check_401=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET --cacert certs/ca.pem -H "SDA-Client-Version: v0.3.0" https://localhost:8443/metadata/datasets) if [ "$check_401" != "401" ]; then echo "no token provided should give 401" @@ -43,7 +42,7 @@ fi echo "got correct response when no token provided" -check_405=$(curl -o /dev/null -s -w "%{http_code}\n" -X POST --cacert certs/ca.pem https://localhost:8443/metadata/datasets) +check_405=$(curl -o /dev/null -s -w "%{http_code}\n" -X POST --cacert certs/ca.pem -H "SDA-Client-Version: v0.3.0" https://localhost:8443/metadata/datasets) if [ "$check_405" != "405" ]; then echo "POST should not be allowed" @@ -58,9 +57,52 @@ echo "got correct response when POST method used" token=$(curl -s --cacert certs/ca.pem "https://localhost:8000/tokens" | jq -r '.[0]') +# Test Client Version Header +# We assume the app is configured to require a minimum version (v0.2.0). + +# Fail - missing header (expected 412 Precondition Failed) +check_missing_header=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:8443/metadata/datasets") + +if [ "$check_missing_header" != "412" ]; then + echo "Client Version Test FAIL: missing header should return 412" + echo "got: ${check_missing_header}" + exit 1 +fi +echo "Client Version Test OK: Missing header correctly returns 412" + +# Fail - insufficient version (e.g., v0.1.0, Expected 412) +check_insufficient_version=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.1.0" "https://localhost:8443/metadata/datasets") + +if [ "$check_insufficient_version" != "412" ]; then + echo "Client Version Test FAIL: insufficient version (v0.1.0) should return 412" + echo "got: ${check_insufficient_version}" + exit 1 +fi +echo "Client Version Test OK: Insufficient version (v0.1.0) correctly returns 412" + +# Success - sufficient version (e.g., v0.2.0, Expected 200) +check_sufficient_version=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.2.0" "https://localhost:8443/metadata/datasets") + +if [ "$check_sufficient_version" != "200" ]; then + echo "Client Version Test FAIL: sufficient version (v0.2.0) should pass version check and return 200" + echo "got: ${check_sufficient_version}" + exit 1 +fi +echo "Client Version Test OK: sufficient version (v0.2.0) correctly returns 200" + +# Success - newer version (e.g., v1.0.0, Expected 200) +check_newer_version=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v1.0.0" "https://localhost:8443/metadata/datasets") + +if [ "$check_newer_version" != "200" ]; then + echo "Client Version Test FAIL: Newer version (v1.0.0) should pass version check and return 200" + echo "got: ${check_newer_version}" + exit 1 +fi +echo "Client Version Test OK: Newer version (v1.0.0) correctly returns 200" + ## Test datasets endpoint -check_dataset=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" https://localhost:8443/metadata/datasets | jq -r '.[0]') +check_dataset=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" https://localhost:8443/metadata/datasets | jq -r '.[0]') if [ "$check_dataset" != "https://doi.example/ty009.sfrrss/600.45asasga" ]; then echo "dataset https://doi.example/ty009.sfrrss/600.45asasga not found" @@ -72,7 +114,7 @@ echo "expected dataset found" ## Test datasets/files endpoint -check_files=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:8443/metadata/datasets/https://doi.example/ty009.sfrrss/600.45asasga/files" | jq -r '.[0].fileId') +check_files=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/metadata/datasets/https://doi.example/ty009.sfrrss/600.45asasga/files" | jq -r '.[0].fileId') if [ "$check_files" != "urn:neic:001-002" ]; then echo "file with id urn:neic:001-002 not found" @@ -90,7 +132,7 @@ export C4GH_PASSPHRASE crypt4gh decrypt -s c4gh.sec.pem -f dummy_data.c4gh && mv dummy_data old-file.txt -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:9443/files/urn:neic:001-002" --output test-download.txt +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/files/urn:neic:001-002" --output test-download.txt cmp --silent old-file.txt test-download.txt status=$? @@ -101,7 +143,7 @@ else exit 1 fi -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:9443/files/urn:neic:001-002?startCoordinate=0&endCoordinate=2" --output test-part.txt +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/files/urn:neic:001-002?startCoordinate=0&endCoordinate=2" --output test-part.txt dd if=old-file.txt ibs=1 skip=0 count=2 > old-part.txt @@ -114,7 +156,7 @@ else exit 1 fi -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:9443/files/urn:neic:001-002?startCoordinate=7&endCoordinate=14" --output test-part2.txt +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/files/urn:neic:001-002?startCoordinate=7&endCoordinate=14" --output test-part2.txt dd if=old-file.txt ibs=1 skip=7 count=7 > old-part2.txt @@ -127,7 +169,7 @@ else exit 1 fi -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:9443/files/urn:neic:001-002?startCoordinate=70000&endCoordinate=140000" --output test-part3.txt +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/files/urn:neic:001-002?startCoordinate=70000&endCoordinate=140000" --output test-part3.txt dd if=old-file.txt ibs=1 skip=70000 count=70000 > old-part3.txt @@ -142,7 +184,7 @@ fi # test that downloads of decrypted files from a download instance that # serves only encrypted files (here running at port 8443) should fail -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:8443/files/urn:neic:001-002" --output test-download-fail.txt +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/files/urn:neic:001-002" --output test-download-fail.txt if ! grep -q "downloading unencrypted data is not supported" test-download-fail.txt; then echo "got unexpected response when trying to download unencrypted data from encrypted endpoint" @@ -156,7 +198,7 @@ token=$(curl -s --cacert certs/ca.pem "https://localhost:8000/tokens" | jq -r ' ## Test datasets endpoint -check_empty_token=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET -I --cacert certs/ca.pem -H "Authorization: Bearer $token" https://localhost:8443/metadata/datasets) +check_empty_token=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET -I --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" https://localhost:8443/metadata/datasets) if [ "$check_empty_token" != "200" ]; then echo "response for empty token is not 200" @@ -174,7 +216,7 @@ token=$(curl -s --cacert certs/ca.pem "https://localhost:8000/tokens" | jq -r ' ## Test datasets endpoint -check_empty_token=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET -I --cacert certs/ca.pem -H "Authorization: Bearer $token" https://localhost:8443/metadata/datasets) +check_empty_token=$(curl -o /dev/null -s -w "%{http_code}\n" -X GET -I --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" https://localhost:8443/metadata/datasets) if [ "$check_empty_token" != "200" ]; then echo "response for token with untrusted sources is not 200" diff --git a/sda-download/.github/integration/tests/common/70_check_download.sh b/sda-download/.github/integration/tests/common/70_check_download.sh index 2e5c320c8..f510065c4 100644 --- a/sda-download/.github/integration/tests/common/70_check_download.sh +++ b/sda-download/.github/integration/tests/common/70_check_download.sh @@ -15,7 +15,7 @@ C4GH_PASSPHRASE=$(yq .c4gh.passphrase config.yaml) export C4GH_PASSPHRASE # download decrypted full file, check file size -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:9443/s3/$dataset/$file" --output full1.bam +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/s3/$dataset/$file" --output full1.bam file_size=$(stat -c %s full1.bam) # Get the size of the file if [ "$file_size" -ne "$expected_size" ]; then @@ -24,7 +24,7 @@ if [ "$file_size" -ne "$expected_size" ]; then fi # test that start, end=0 returns the whole file -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:9443/s3/$dataset/$file?startCoordinate=0&endCoordinate=0" --output full2.bam +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/s3/$dataset/$file?startCoordinate=0&endCoordinate=0" --output full2.bam if ! cmp --silent full1.bam full2.bam; then echo "Full decrypted files, with and without coordinates, are different" diff --git a/sda-download/.github/integration/tests/common/80_check_reencrypt.sh b/sda-download/.github/integration/tests/common/80_check_reencrypt.sh index 8354a8e8f..2f90949e3 100644 --- a/sda-download/.github/integration/tests/common/80_check_reencrypt.sh +++ b/sda-download/.github/integration/tests/common/80_check_reencrypt.sh @@ -19,7 +19,7 @@ file="dummy_data" expected_size=1048605 # Download unencrypted full file (from download service at port 9443), check file size -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:9443/s3/$dataset/$file" --output full1.bam +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/s3/$dataset/$file" --output full1.bam file_size=$(stat -c %s full1.bam) # Get the size of the file if [ "$file_size" -ne "$expected_size" ]; then @@ -30,7 +30,7 @@ fi # Test reencrypt the file header with the client public key clientkey=$(base64 -w0 client.pub.pem) reencryptedFile=reencrypted.bam.c4gh -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" "https://localhost:8443/s3/$dataset/$file" --output $reencryptedFile +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file" --output $reencryptedFile expected_encrypted_size=1049205 file_size=$(stat -c %s $reencryptedFile) @@ -55,7 +55,7 @@ fi # download reencrypted partial file, check file size partReencryptedFile=part1.bam.c4gh -curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" "https://localhost:8443/s3/$dataset/$file?startCoordinate=0&endCoordinate=1000" --output $partReencryptedFile +curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file?startCoordinate=0&endCoordinate=1000" --output $partReencryptedFile file_size=$(stat -c %s $partReencryptedFile) # Get the size of the file part_expected_size=65688 diff --git a/sda-download/.github/integration/tests/common/90_check_s3_errors.sh b/sda-download/.github/integration/tests/common/90_check_s3_errors.sh index a0413a246..0c5cefe13 100644 --- a/sda-download/.github/integration/tests/common/90_check_s3_errors.sh +++ b/sda-download/.github/integration/tests/common/90_check_s3_errors.sh @@ -22,39 +22,39 @@ bad_token=BADeyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJyZXF1ZXN0ZXJAZGVtby # Test error codes and error messages returned to the user # try to download encrypted file without sending a public key -resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:8443/s3/$dataset/$file") +resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file") if ! echo "$resp" | grep -q "c4gh public key is missing from the header"; then echo "Incorrect response, expected 'c4gh public key is missing from the header' got $resp" exit 1 fi -resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" "https://localhost:8443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") +resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") if [ "$resp" -ne 400 ]; then echo "Incorrect response with missing public key, expected 400 got $resp" exit 1 fi # try to download encrypted file with a bad public key -resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: YmFkIGtleQ==" "https://localhost:8443/s3/$dataset/$file") +resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: YmFkIGtleQ==" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file") if ! echo "$resp" | grep -q "file re-encryption error"; then echo "Incorrect response, expected 'file re-encryption error' got $resp" exit 1 fi -resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: YmFkIGtleQ==" "https://localhost:8443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") +resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: YmFkIGtleQ==" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") if [ "$resp" -ne 500 ]; then echo "Incorrect response with missing public key, expected 500 got $resp" fi # try to download encrypted file from instance that serves unencrypted files -resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" "https://localhost:9443/s3/$dataset/$file") +resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/s3/$dataset/$file") if ! echo "$resp" | grep -q "downloading encrypted data is not supported"; then echo "Incorrect response, expected 'downloading encrypted data is not supported' got $resp" exit 1 fi -resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" "https://localhost:9443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") +resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") if [ "$resp" -ne 400 ]; then echo "Incorrect response, expected 400 got $resp" exit 1 @@ -63,13 +63,13 @@ fi # try to download a file the user doesn't have access to -resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $bad_token" -H "Client-Public-Key: $clientkey" "https://localhost:8443/s3/$dataset/$file") +resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $bad_token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file") if ! echo "$resp" | grep -q "get visas failed"; then echo "Incorrect response, expected 'get visas failed' got $resp" exit 1 fi -resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $bad_token" -H "Client-Public-Key: $clientkey" "https://localhost:8443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") +resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $bad_token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:8443/s3/$dataset/$file" -s -o /dev/null -w "%{http_code}") if [ "$resp" -ne 401 ]; then echo "Incorrect response, expected 401 got $resp" exit 1 @@ -77,13 +77,13 @@ fi # try to download a file that does not exist -resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" "https://localhost:9443/s3/$dataset/nonexistentfile") +resp=$(curl -s --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/s3/$dataset/nonexistentfile") if [ -n "$resp" ]; then echo "Incorrect response, expected no error message, got $resp" exit 1 fi -resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" "https://localhost:9443/s3/$dataset/nonexistentfile" -s -o /dev/null -w "%{http_code}") +resp=$(curl --cacert certs/ca.pem -H "Authorization: Bearer $token" -H "Client-Public-Key: $clientkey" -H "SDA-Client-Version: v0.3.0" "https://localhost:9443/s3/$dataset/nonexistentfile" -s -o /dev/null -w "%{http_code}") if [ "$resp" -ne 404 ]; then echo "Incorrect response, expected 404 got $resp" exit 1 diff --git a/sda-download/.github/integration/tests/s3notls/52_check_endpoint.sh b/sda-download/.github/integration/tests/s3notls/52_check_endpoint.sh index a065a2ec3..e70c08f63 100644 --- a/sda-download/.github/integration/tests/s3notls/52_check_endpoint.sh +++ b/sda-download/.github/integration/tests/s3notls/52_check_endpoint.sh @@ -18,7 +18,7 @@ echo "Health endpoint is ok" # ------------------ # Test empty token -check_401=$(curl -o /dev/null -s -w "%{http_code}\n" http://localhost:8080/metadata/datasets) +check_401=$(curl -o /dev/null -s -w "%{http_code}\n" -H "SDA-Client-Version: v0.3.0" http://localhost:8080/metadata/datasets) if [ "$check_401" != "401" ]; then echo "no token provided should give 401" @@ -28,7 +28,7 @@ fi echo "got correct response when no token provided" -check_405=$(curl -X POST -o /dev/null -s -w "%{http_code}\n" http://localhost:8080/metadata/datasets ) +check_405=$(curl -X POST -o /dev/null -s -w "%{http_code}\n" -H "SDA-Client-Version: v0.3.0" http://localhost:8080/metadata/datasets) if [ "$check_405" != "405" ]; then echo "POST should not be allowed" @@ -45,7 +45,7 @@ token=$(curl -s "http://localhost:8000/tokens" | jq -r '.[0]') ## Test datasets endpoint -check_dataset=$(curl -s -H "Authorization: Bearer $token" http://localhost:8080/metadata/datasets | jq -r '.[0]') +check_dataset=$(curl -s -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" http://localhost:8080/metadata/datasets | jq -r '.[0]') if [ "$check_dataset" != "https://doi.example/ty009.sfrrss/600.45asasga" ]; then echo "dataset https://doi.example/ty009.sfrrss/600.45asasga not found" @@ -57,7 +57,7 @@ echo "expected dataset found" ## Test datasets/files endpoint -check_files=$(curl -s -H "Authorization: Bearer $token" "http://localhost:8080/metadata/datasets/https://doi.example/ty009.sfrrss/600.45asasga/files" | jq -r '.[0].fileId') +check_files=$(curl -s -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "http://localhost:8080/metadata/datasets/https://doi.example/ty009.sfrrss/600.45asasga/files" | jq -r '.[0].fileId') if [ "$check_files" != "urn:neic:001-002" ]; then echo "file with id urn:neic:001-002 not found" @@ -76,7 +76,7 @@ export C4GH_PASSPHRASE crypt4gh decrypt -s c4gh.sec.pem -f dummy_data.c4gh && mv dummy_data old-file.txt # first try downloading from download instance serving encrypted data, should fail -curl -s -H "Authorization: Bearer $token" "http://localhost:8080/files/urn:neic:001-002" --output test-download.txt +curl -s -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "http://localhost:8080/files/urn:neic:001-002" --output test-download.txt if ! grep -q "downloading unencrypted data is not supported" "test-download.txt"; then echo "wrong response when trying to download unencrypted data from encrypted endpoint" @@ -84,7 +84,7 @@ if ! grep -q "downloading unencrypted data is not supported" "test-download.txt" fi # now try downloading from download instance serving unencrypted data -curl -s -H "Authorization: Bearer $token" "http://localhost:9080/files/urn:neic:001-002" --output test-download.txt +curl -s -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "http://localhost:9080/files/urn:neic:001-002" --output test-download.txt cmp --silent old-file.txt test-download.txt @@ -96,7 +96,7 @@ else fi # downloading from download instance serving unencrypted data -curl -s -H "Authorization: Bearer $token" "http://localhost:9080/files/urn:neic:001-002?startCoordinate=0&endCoordinate=2" --output test-part.txt +curl -s -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "http://localhost:9080/files/urn:neic:001-002?startCoordinate=0&endCoordinate=2" --output test-part.txt dd if=old-file.txt ibs=1 skip=0 count=2 > old-part.txt @@ -110,7 +110,7 @@ else fi # downloading from download instance serving unencrypted data -curl -s -H "Authorization: Bearer $token" "http://localhost:9080/files/urn:neic:001-002?startCoordinate=7&endCoordinate=14" --output test-part2.txt +curl -s -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" "http://localhost:9080/files/urn:neic:001-002?startCoordinate=7&endCoordinate=14" --output test-part2.txt dd if=old-file.txt ibs=1 skip=7 count=7 > old-part2.txt @@ -130,7 +130,7 @@ token=$(curl -s "http://localhost:8000/tokens" | jq -r '.[1]') ## Test datasets endpoint -check_empty_token=$(curl -o /dev/null -s -w "%{http_code}\n" -H "Authorization: Bearer $token" http://localhost:8080/metadata/datasets) +check_empty_token=$(curl -o /dev/null -s -w "%{http_code}\n" -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" http://localhost:8080/metadata/datasets) if [ "$check_empty_token" != "200" ]; then echo "response for empty token is not 200" @@ -148,7 +148,7 @@ token=$(curl -s "http://localhost:8000/tokens" | jq -r '.[2]') ## Test datasets endpoint -check_dataset=$(curl -s -H "Authorization: Bearer $token" http://localhost:8080/metadata/datasets | jq -r '.[0]') +check_dataset=$(curl -s -H "Authorization: Bearer $token" -H "SDA-Client-Version: v0.3.0" http://localhost:8080/metadata/datasets | jq -r '.[0]') if [ "$check_dataset" != "https://doi.example/ty009.sfrrss/600.45asasga" ]; then echo "dataset https://doi.example/ty009.sfrrss/600.45asasga not found" diff --git a/sda-download/api/api.go b/sda-download/api/api.go index ae1a4c8dd..1c8f1816f 100644 --- a/sda-download/api/api.go +++ b/sda-download/api/api.go @@ -9,18 +9,25 @@ import ( "time" "github.com/gin-gonic/gin" + "github.com/neicnordic/sda-download/api/middleware" "github.com/neicnordic/sda-download/api/s3" "github.com/neicnordic/sda-download/api/sda" "github.com/neicnordic/sda-download/internal/config" log "github.com/sirupsen/logrus" ) -// SelectedMiddleware is used to control authentication and authorization -// behaviour with config app.middleware -// available middlewares: -// "default" for TokenMiddleware -var SelectedMiddleware = func() gin.HandlerFunc { - return nil +// SelectedMiddleware returns the middleware chain based on configuration. +// For example, config.Config.App.Middleware could be "default", "token-clientversion", etc. +var SelectedMiddleware = func() []gin.HandlerFunc { + switch strings.ToLower(config.Config.App.Middleware) { + case "token-clientversion": + return []gin.HandlerFunc{ + middleware.TokenMiddleware(), + middleware.ClientVersionMiddleware(), + } + default: + return []gin.HandlerFunc{middleware.TokenMiddleware()} + } } // healthResponse @@ -65,12 +72,14 @@ func Setup() *http.Server { } router.HandleMethodNotAllowed = true + // protected endpoints + router.GET("/metadata/datasets", append(SelectedMiddleware(), sda.Datasets)...) + router.GET("/metadata/datasets/*dataset", append(SelectedMiddleware(), sda.Files)...) + router.GET("/files/:fileid", append(SelectedMiddleware(), sda.Download)...) + router.GET("/s3/*path", append(SelectedMiddleware(), s3.Download)...) + router.HEAD("/s3/*path", append(SelectedMiddleware(), s3.Download)...) - router.GET("/metadata/datasets", SelectedMiddleware(), sda.Datasets) - router.GET("/metadata/datasets/*dataset", SelectedMiddleware(), sda.Files) - router.GET("/files/:fileid", SelectedMiddleware(), sda.Download) - router.GET("/s3/*path", SelectedMiddleware(), s3.Download) - router.HEAD("/s3/*path", SelectedMiddleware(), s3.Download) + // public endpoints router.GET("/health", healthResponse) router.HEAD("/", healthResponse) diff --git a/sda-download/api/api.md b/sda-download/api/api.md index 8f21c13dd..533a4dbf1 100644 --- a/sda-download/api/api.md +++ b/sda-download/api/api.md @@ -59,25 +59,19 @@ For example, given a dataset name `https://doi.org/abc/123`, one can do `GET /me [ { "fileId": "urn:file:1", - "datasetId": "dataset_1", "displayFileName": "file_1.txt.c4gh", - "fileName": "hash", - "fileSize": 60, + "filePath": "directory/file_1.txt.c4gh", "decryptedFileSize": 32, "decryptedFileChecksum": "hash", "decryptedFileChecksumType": "SHA256", - "fileStatus": "READY" }, { "fileId": "urn:file:2", - "datasetId": "dataset_1", "displayFileName": "file_2.txt.c4gh", - "fileName": "hash", - "fileSize": 60, + "filePath": "directory/file_2.txt.c4gh", "decryptedFileSize": 32, "decryptedFileChecksum": "hash", "decryptedFileChecksumType": "SHA256", - "fileStatus": "READY" }, ] ``` diff --git a/sda-download/api/middleware/middleware.go b/sda-download/api/middleware/middleware.go index de6a9846d..94f6106c4 100644 --- a/sda-download/api/middleware/middleware.go +++ b/sda-download/api/middleware/middleware.go @@ -1,8 +1,10 @@ package middleware import ( + "fmt" "net/http" + "github.com/Masterminds/semver/v3" "github.com/gin-gonic/gin" "github.com/neicnordic/sda-download/internal/config" "github.com/neicnordic/sda-download/internal/session" @@ -85,6 +87,50 @@ func TokenMiddleware() gin.HandlerFunc { } } +// ClientVersionMiddleware checks for the required "SDA-Client-Version" header. +// It aborts the request with 412 (Precondition Failed) if the header is missing or +// if the version does not meet the minimum required version. +func ClientVersionMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + clientVersionStr := c.GetHeader("SDA-Client-Version") + + // Check if the header is present + if clientVersionStr == "" { + log.Debugln("request blocked (412): Missing client version header in request") + c.String(http.StatusPreconditionFailed, "Missing client version header in request") + c.Abort() + + return + } + + // Parse the client's provided version (using the processed string) + clientVersion, err := semver.NewVersion(clientVersionStr) + if err != nil { + log.Debugf("client version header '%s' is not a valid semantic version: %v", clientVersionStr, err) + c.String(http.StatusPreconditionFailed, "client version header is not a valid semantic version") + c.Abort() + + return + } + + // Check if the client version is sufficient (clientVersion >= minimalVersion) + if clientVersion.LessThan(config.Config.App.MinimalCliVersion) { + errorMessage := fmt.Sprintf("Error: Your sda-cli client version is outdated, please update to at least version '%s'.", config.Config.App.MinimalCliVersionStr) + log.Debugf("request blocked (412): outdated client version '%s'. Required minimum '%s'", clientVersionStr, config.Config.App.MinimalCliVersionStr) + c.String(http.StatusPreconditionFailed, errorMessage) + c.Abort() + + return + } + + // Version is correct, proceed to the next handler/middleware + log.Debugf("client version check passed: %s", clientVersionStr) + + // Forward request to the next endpoint handler + c.Next() + } +} + // GetCacheFromContext is a helper function that endpoints can use to get data // stored to the *current* request context (not the session storage). // The request context was populated by the middleware, which in turn uses the session storage. diff --git a/sda-download/api/middleware/middleware_test.go b/sda-download/api/middleware/middleware_test.go index ed680004d..ffdcd35c0 100644 --- a/sda-download/api/middleware/middleware_test.go +++ b/sda-download/api/middleware/middleware_test.go @@ -7,8 +7,10 @@ import ( "net/http" "net/http/httptest" "reflect" + "strings" "testing" + "github.com/Masterminds/semver/v3" "github.com/gin-gonic/gin" "github.com/neicnordic/sda-download/internal/config" "github.com/neicnordic/sda-download/internal/session" @@ -303,3 +305,105 @@ func TestGetDatasets(t *testing.T) { t.Errorf("TestStoreDatasets failed, got %s, expected %s", storedDatasets, datasets) } } + +func TestClientVersionMiddleware(t *testing.T) { + originalMinimalCliVersion := config.Config.App.MinimalCliVersion + defer func() { + config.Config.App.MinimalCliVersion = originalMinimalCliVersion + }() + + tests := []struct { + name string + clientVersionHeader string + configMinimalVersion string + expectedStatus int + expectedBodyContains string + }{ + { + name: "Fail_MissingHeader", + clientVersionHeader: "", + configMinimalVersion: "v0.2.0", + expectedStatus: http.StatusPreconditionFailed, // 412 + expectedBodyContains: "Missing client version header in request", + }, + { + name: "Fail_InvalidClientSemVer", + clientVersionHeader: "v-invalid-1", + configMinimalVersion: "v0.2.0", + expectedStatus: http.StatusPreconditionFailed, // 412 + expectedBodyContains: "is not a valid semantic version", + }, + { + name: "Fail_InsufficientVersion", + clientVersionHeader: "v0.1.9", + configMinimalVersion: "v0.2.0", + expectedStatus: http.StatusPreconditionFailed, // 412 + expectedBodyContains: "is outdated, please update to at least version 'v0.2.0'", + }, + { + name: "Success_EqualVersion", + clientVersionHeader: "v0.2.0", + configMinimalVersion: "v0.2.0", + expectedStatus: http.StatusOK, // 200 + expectedBodyContains: "", + }, + { + name: "Success_NewerVersion", + clientVersionHeader: "v0.3.0", + configMinimalVersion: "v0.2.0", + expectedStatus: http.StatusOK, // 200 + expectedBodyContains: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Setup + w := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/", nil) + _, router := gin.CreateTestContext(w) + + config.Config.App.MinimalCliVersionStr = tt.configMinimalVersion + // Set the configuration mock by parsing the string into the required SemVer object + parsedVersion, err := semver.NewVersion(tt.configMinimalVersion) + if err != nil { + t.Fatalf("Test setup error: Failed to parse minimal version '%s': %v", tt.configMinimalVersion, err) + } + config.Config.App.MinimalCliVersion = parsedVersion + + if tt.clientVersionHeader != "" { + r.Header.Set("SDA-Client-Version", tt.clientVersionHeader) + } + + // Define a dummy handler to check if the middleware allowed passage + var passed bool + dummyHandler := func(c *gin.Context) { + passed = true + c.Status(http.StatusOK) // Explicitly set OK status if allowed to pass + } + + // Send request through the middleware + router.GET("/", ClientVersionMiddleware(), dummyHandler) + router.ServeHTTP(w, r) + + // Assertion 1: Check Status Code + if w.Code != tt.expectedStatus { + t.Errorf("status code mismatch.\nGot: %d\nWant: %d", w.Code, tt.expectedStatus) + } + + // Assertion 2: Check Body Content for Failures + body := w.Body.String() + if tt.expectedStatus != http.StatusOK && !strings.Contains(body, tt.expectedBodyContains) { + t.Errorf("response body mismatch.\nGot Body: %s\nWant Body to contain: %s", body, tt.expectedBodyContains) + } + + // Assertion 3: Check if the request was allowed to pass (only for success cases) + if tt.expectedStatus == http.StatusOK && !passed { + t.Error("success case failed: Middleware unexpectedly blocked the request.") + } + if tt.expectedStatus != http.StatusOK && passed { + t.Error("failure case failed: Middleware unexpectedly allowed the request to pass.") + } + }) + } +} diff --git a/sda-download/api/s3/s3.go b/sda-download/api/s3/s3.go index 35cb03667..efd4734ba 100644 --- a/sda-download/api/s3/s3.go +++ b/sda-download/api/s3/s3.go @@ -185,13 +185,13 @@ func ListObjects(c *gin.Context) { }) } -func getFileInfo(c *gin.Context) (fileInfo *database.FileInfo, err error) { +func getFileStableID(c *gin.Context) (fileStableID string, err error) { // Get file info for the given file path (or abort) filename := c.Param("filename") if !strings.HasSuffix(c.Param("filename"), ".c4gh") { filename = c.Param("filename") + ".c4gh" } - fileInfo, err = database.GetDatasetFileInfo(c.Param("dataset"), filename) + fileStableID, err = database.GetDatasetFileStableID(c.Param("dataset"), filename) if err != nil { if err.Error() == "sql: no rows in result set" { c.AbortWithStatus(http.StatusNotFound) @@ -202,7 +202,7 @@ func getFileInfo(c *gin.Context) (fileInfo *database.FileInfo, err error) { return } - return fileInfo, nil + return fileStableID, nil } // GetObject respondes to an S3 GetObject request. This request returns S3 @@ -213,7 +213,7 @@ func getFileInfo(c *gin.Context) (fileInfo *database.FileInfo, err error) { func GetObject(c *gin.Context) { log.Debugf("S3 GetObject request, context: %v", c.Params) - fileInfo, err := getFileInfo(c) + fileStableID, err := getFileStableID(c) if err != nil { return } @@ -222,7 +222,7 @@ func GetObject(c *gin.Context) { c.Set("S3", true) // set the fileID so that download knows what file to download - c.Params = append(c.Params, gin.Param{Key: "fileid", Value: fileInfo.FileID}) + c.Params = append(c.Params, gin.Param{Key: "fileid", Value: fileStableID}) // Download the file sda.Download(c) @@ -236,7 +236,7 @@ func GetObject(c *gin.Context) { func GetEcnryptedObject(c *gin.Context) { log.Debugf("S3 GetEncryptedObject request, context: %v", c.Params) - fileInfo, err := getFileInfo(c) + fileStableID, err := getFileStableID(c) if err != nil { return } @@ -245,7 +245,7 @@ func GetEcnryptedObject(c *gin.Context) { c.Set("S3", true) // set the fileID so that download knows what file to download - c.Params = append(c.Params, gin.Param{Key: "fileid", Value: fileInfo.FileID}) + c.Params = append(c.Params, gin.Param{Key: "fileid", Value: fileStableID}) // set the encrypted parameter so that download gets the encrypted file instead c.Params = append(c.Params, gin.Param{Key: "type", Value: "encrypted"}) diff --git a/sda-download/api/s3/s3_test.go b/sda-download/api/s3/s3_test.go index 5c05a05af..ab9f11290 100644 --- a/sda-download/api/s3/s3_test.go +++ b/sda-download/api/s3/s3_test.go @@ -117,12 +117,8 @@ func (ts *S3TestSuite) TestListByPrefix() { // Setup a mock database to handle queries fileInfo := &database.FileInfo{ FileID: "file1", - DatasetID: "dataset1", DisplayFileName: "file.txt", FilePath: "dir/file.txt", - EncryptedFileSize: 60, - EncryptedFileChecksum: "hash", - EncryptedFileChecksumType: "sha256", DecryptedFileSize: 32, DecryptedFileChecksum: "hash", DecryptedFileChecksumType: "sha256", @@ -131,35 +127,27 @@ func (ts *S3TestSuite) TestListByPrefix() { userID := "user1" query := ` - SELECT files.stable_id AS id, - datasets.stable_id AS dataset_id, - reverse\(split_part\(reverse\(files.submission_file_path::text\), '/'::text, 1\)\) AS display_file_name, - files.submission_user AS user_id, - files.submission_file_path AS file_path, - files.archive_file_size AS file_size, - lef.archive_file_checksum AS encrypted_file_checksum, - lef.archive_file_checksum_type AS encrypted_file_checksum_type, - files.decrypted_file_size, - sha.checksum AS decrypted_file_checksum, - sha.type AS decrypted_file_checksum_type - FROM sda.files - JOIN sda.file_dataset ON file_id = files.id - JOIN sda.datasets ON file_dataset.dataset_id = datasets.id - LEFT JOIN local_ega.files lef ON files.stable_id = lef.stable_id - LEFT JOIN \(SELECT file_id, \(ARRAY_AGG\(event ORDER BY started_at DESC\)\)\[1\] AS event FROM sda.file_event_log GROUP BY file_id\) log ON files.id = log.file_id - LEFT JOIN \(SELECT file_id, checksum, type FROM sda.checksums WHERE source = 'UNENCRYPTED'\) sha ON files.id = sha.file_id - WHERE datasets.stable_id = \$1; - ` +SELECT files.stable_id AS id, + reverse\(split_part\(reverse\(files.submission_file_path::text\), '/'::text, 1\)\) AS display_file_name, + files.submission_user AS user_id, + files.submission_file_path AS file_path, + files.decrypted_file_size, + sha_unenc.checksum AS decrypted_file_checksum, + sha_unenc.type AS decrypted_file_checksum_type +FROM sda.files + JOIN sda.file_dataset file_dataset ON file_dataset.file_id = files.id + JOIN sda.datasets datasets ON file_dataset.dataset_id = datasets.id + LEFT JOIN sda.checksums sha_unenc ON files.id = sha_unenc.file_id AND sha_unenc.source = 'UNENCRYPTED' +WHERE datasets.stable_id = \$1;` + ts.Mock.ExpectQuery(query). WithArgs("dataset1"). - WillReturnRows(sqlmock.NewRows([]string{"file_id", "dataset_id", - "display_file_name", "user_id", "file_path", "file_size", - "decrypted_file_checksum", "decrypted_file_checksum_type", + WillReturnRows(sqlmock.NewRows([]string{"file_id", + "display_file_name", "user_id", "file_path", "decrypted_file_size", "decrypted_file_checksum", - "decrypted_file_checksum_type"}).AddRow(fileInfo.FileID, fileInfo.DatasetID, + "decrypted_file_checksum_type"}).AddRow(fileInfo.FileID, fileInfo.DisplayFileName, userID, fileInfo.FilePath, - fileInfo.EncryptedFileSize, fileInfo.EncryptedFileChecksum, fileInfo.EncryptedFileChecksumType, fileInfo.DecryptedFileSize, - fileInfo.DecryptedFileChecksum, fileInfo.DecryptedFileChecksumType)) + fileInfo.DecryptedFileSize, fileInfo.DecryptedFileChecksum, fileInfo.DecryptedFileChecksumType)) // Send a request through the middleware to get files for the dataset and // prefix @@ -194,12 +182,8 @@ func (ts *S3TestSuite) TestListObjects() { // Setup a mock database to handlequeries fileInfo := &database.FileInfo{ FileID: "file1", - DatasetID: "dataset1", DisplayFileName: "file.txt", FilePath: "dir/file.txt", - EncryptedFileSize: 60, - EncryptedFileChecksum: "hash", - EncryptedFileChecksumType: "sha256", DecryptedFileSize: 32, DecryptedFileChecksum: "hash", DecryptedFileChecksumType: "sha256", @@ -208,35 +192,27 @@ func (ts *S3TestSuite) TestListObjects() { userID := "user1" query := ` - SELECT files.stable_id AS id, - datasets.stable_id AS dataset_id, - reverse\(split_part\(reverse\(files.submission_file_path::text\), '/'::text, 1\)\) AS display_file_name, - files.submission_user AS user_id, - files.submission_file_path AS file_path, - files.archive_file_size AS file_size, - lef.archive_file_checksum AS encrypted_file_checksum, - lef.archive_file_checksum_type AS encrypted_file_checksum_type, - files.decrypted_file_size, - sha.checksum AS decrypted_file_checksum, - sha.type AS decrypted_file_checksum_type - FROM sda.files - JOIN sda.file_dataset ON file_id = files.id - JOIN sda.datasets ON file_dataset.dataset_id = datasets.id - LEFT JOIN local_ega.files lef ON files.stable_id = lef.stable_id - LEFT JOIN \(SELECT file_id, \(ARRAY_AGG\(event ORDER BY started_at DESC\)\)\[1\] AS event FROM sda.file_event_log GROUP BY file_id\) log ON files.id = log.file_id - LEFT JOIN \(SELECT file_id, checksum, type FROM sda.checksums WHERE source = 'UNENCRYPTED'\) sha ON files.id = sha.file_id - WHERE datasets.stable_id = \$1; - ` +SELECT files.stable_id AS id, + reverse\(split_part\(reverse\(files.submission_file_path::text\), '/'::text, 1\)\) AS display_file_name, + files.submission_user AS user_id, + files.submission_file_path AS file_path, + files.decrypted_file_size, + sha_unenc.checksum AS decrypted_file_checksum, + sha_unenc.type AS decrypted_file_checksum_type +FROM sda.files + JOIN sda.file_dataset file_dataset ON file_dataset.file_id = files.id + JOIN sda.datasets datasets ON file_dataset.dataset_id = datasets.id + LEFT JOIN sda.checksums sha_unenc ON files.id = sha_unenc.file_id AND sha_unenc.source = 'UNENCRYPTED' +WHERE datasets.stable_id = \$1;` + ts.Mock.ExpectQuery(query). WithArgs("dataset1"). - WillReturnRows(sqlmock.NewRows([]string{"file_id", "dataset_id", - "display_file_name", "user_id", "file_path", "file_size", - "encrypted_file_checksum", "encrypted_file_checksum_type", + WillReturnRows(sqlmock.NewRows([]string{"file_id", + "display_file_name", "user_id", "file_path", "decrypted_file_size", "decrypted_file_checksum", - "decrypted_file_checksum_type"}).AddRow(fileInfo.FileID, fileInfo.DatasetID, + "decrypted_file_checksum_type"}).AddRow(fileInfo.FileID, fileInfo.DisplayFileName, userID, fileInfo.FilePath, - fileInfo.EncryptedFileSize, fileInfo.EncryptedFileChecksum, fileInfo.EncryptedFileChecksumType, fileInfo.DecryptedFileSize, - fileInfo.DecryptedFileChecksum, fileInfo.DecryptedFileChecksumType)) + fileInfo.DecryptedFileSize, fileInfo.DecryptedFileChecksum, fileInfo.DecryptedFileChecksumType)) // Send a request through the middleware to get datasets @@ -303,7 +279,7 @@ func (ts *S3TestSuite) TestParseParams() { router.ServeHTTP(w, httptest.NewRequest("GET", params.Path, nil)) response := w.Result() - response.Body.Close() + _ = response.Body.Close() assert.Equal(ts.T(), http.StatusAccepted, response.StatusCode, "Request failed") } diff --git a/sda-download/api/sda/sda_test.go b/sda-download/api/sda/sda_test.go index 087ca86f3..516bae40d 100644 --- a/sda-download/api/sda/sda_test.go +++ b/sda-download/api/sda/sda_test.go @@ -270,12 +270,8 @@ func TestFiles_Success(t *testing.T) { getFiles = func(_ string, _ *gin.Context) ([]*database.FileInfo, int, error) { fileInfo := database.FileInfo{ FileID: "file1", - DatasetID: "dataset1", DisplayFileName: "file1.txt", FilePath: "dir/file1.txt", - EncryptedFileSize: 200, - EncryptedFileChecksum: "hash", - EncryptedFileChecksumType: "sha256", DecryptedFileSize: 100, DecryptedFileChecksum: "hash", DecryptedFileChecksumType: "sha256", @@ -303,11 +299,9 @@ func TestFiles_Success(t *testing.T) { body, _ := io.ReadAll(response.Body) expectedStatusCode := 200 expectedBody := []byte( - `[{"fileId":"file1","datasetId":"dataset1","displayFileName":"file1.txt","filePath":` + - `"dir/file1.txt","encryptedFileSize":200,` + - `"encryptedFileChecksum":"hash","encryptedFileChecksumType":"sha256",` + - `"decryptedFileSize":100,` + - `"decryptedFileChecksum":"hash","decryptedFileChecksumType":"sha256"}]`) + `[{"fileId":"file1","displayFileName":"file1.txt","filePath":` + + `"dir/file1.txt",` + + `"decryptedFileSize":100,"decryptedFileChecksum":"hash","decryptedFileChecksumType":"sha256"}]`) if response.StatusCode != expectedStatusCode { t.Errorf("TestDatasets failed, got %d expected %d", response.StatusCode, expectedStatusCode) @@ -752,7 +746,7 @@ func TestDownload_Whole_Range_Encrypted(t *testing.T) { privPEM := "-----BEGIN PRIVATE KEY-----\n" + base64.StdEncoding.EncodeToString(privdata) + "\n-----END PRIVATE KEY-----\n" _, err = keyfile.Write([]byte(privPEM)) assert.NoError(t, err, "Could not write private key") - keyfile.Close() + _ = keyfile.Close() certfile, err := os.CreateTemp("", "cert") assert.NoError(t, err, "Could not create temp file for cert") @@ -760,7 +754,7 @@ func TestDownload_Whole_Range_Encrypted(t *testing.T) { pubPEM := "-----BEGIN CERTIFICATE-----\n" + base64.StdEncoding.EncodeToString(server.Certificate().Raw) + "\n-----END CERTIFICATE-----\n" _, err = certfile.Write([]byte(pubPEM)) assert.NoError(t, err, "Could not write public key") - certfile.Close() + _ = certfile.Close() // Configure Reencrypt to use fake server config.Config.Reencrypt.Host = serverdetails[0] @@ -814,7 +808,7 @@ func TestDownload_Whole_Range_Encrypted(t *testing.T) { _, err = io.Copy(datafile, &bufferWriter) assert.NoError(t, err, "Could not write temporary file") - datafile.Close() + _ = datafile.Close() // Substitute mock functions database.CheckFilePermission = func(_ string) (string, error) { diff --git a/sda-download/cmd/main.go b/sda-download/cmd/main.go index 11734022d..2c55930b6 100644 --- a/sda-download/cmd/main.go +++ b/sda-download/cmd/main.go @@ -2,7 +2,6 @@ package main import ( "github.com/neicnordic/sda-download/api" - "github.com/neicnordic/sda-download/api/middleware" "github.com/neicnordic/sda-download/api/sda" "github.com/neicnordic/sda-download/internal/config" "github.com/neicnordic/sda-download/internal/database" @@ -24,12 +23,6 @@ func init() { } config.Config = *conf - // Set middleware - // nolint:gocritic // this nolint can be removed, if you have more than one middlewares available - switch conf.App.Middleware { //nolint:revive - default: - api.SelectedMiddleware = middleware.TokenMiddleware - } log.Infof("%s middleware selected", conf.App.Middleware) // Connect to database diff --git a/sda-download/dev_utils/config-notls.yaml b/sda-download/dev_utils/config-notls.yaml index f67d8d7f5..63f8e2e89 100644 --- a/sda-download/dev_utils/config-notls.yaml +++ b/sda-download/dev_utils/config-notls.yaml @@ -1,3 +1,7 @@ +app: + middleware: "token-clientversion" + minimalcliversion: "v0.2.0" + log: level: "debug" format: "json" diff --git a/sda-download/dev_utils/config.yaml b/sda-download/dev_utils/config.yaml index a75f9bfec..4dea553fb 100644 --- a/sda-download/dev_utils/config.yaml +++ b/sda-download/dev_utils/config.yaml @@ -3,7 +3,8 @@ app: servercert: "./dev_utils/certs/download.pem" serverkey: "./dev_utils/certs/download-key.pem" port: "8443" - middleware: "default" + middleware: "token-clientversion" + minimalcliversion: "v0.2.0" log: level: "debug" diff --git a/sda-download/go.mod b/sda-download/go.mod index be1582d70..1ce8f5894 100644 --- a/sda-download/go.mod +++ b/sda-download/go.mod @@ -4,9 +4,10 @@ go 1.24.1 require ( github.com/DATA-DOG/go-sqlmock v1.5.2 + github.com/Masterminds/semver/v3 v3.4.0 github.com/aws/aws-sdk-go v1.55.8 github.com/dgraph-io/ristretto v1.0.0 - github.com/gin-gonic/gin v1.10.1 + github.com/gin-gonic/gin v1.11.0 github.com/google/uuid v1.6.0 github.com/johannesboyne/gofakes3 v0.0.0-20240914150247-80d9d7b1b250 github.com/lestrrat-go/jwx/v2 v2.1.6 @@ -15,18 +16,18 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/viper v1.21.0 github.com/stretchr/testify v1.11.1 - golang.org/x/crypto v0.42.0 + golang.org/x/crypto v0.45.0 golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 - google.golang.org/grpc v1.75.0 - google.golang.org/protobuf v1.36.8 + google.golang.org/grpc v1.77.0 + google.golang.org/protobuf v1.36.10 ) require ( filippo.io/edwards25519 v1.1.0 // indirect - github.com/bytedance/sonic v1.13.2 // indirect - github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cloudwego/base64x v0.1.5 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect @@ -36,12 +37,13 @@ require ( github.com/gin-contrib/sse v1.1.0 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.26.0 // indirect + github.com/go-playground/validator/v10 v10.27.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-json v0.10.5 // indirect + github.com/goccy/go-yaml v1.18.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lestrrat-go/blackmagic v1.0.3 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect @@ -54,6 +56,8 @@ require ( github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.54.1 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/segmentio/asm v1.2.0 // indirect @@ -64,14 +68,17 @@ require ( github.com/spf13/pflag v1.0.10 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.12 // indirect + github.com/ugorji/go/codec v1.3.0 // indirect + go.uber.org/mock v0.5.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/arch v0.17.0 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/text v0.29.0 // indirect - golang.org/x/tools v0.36.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + golang.org/x/arch v0.20.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/tools v0.38.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/sda-download/go.sum b/sda-download/go.sum index ad2264e99..ab80b1378 100644 --- a/sda-download/go.sum +++ b/sda-download/go.sum @@ -2,21 +2,21 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/aws/aws-sdk-go v1.44.256/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= -github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= -github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cevatbarisyilmaz/ara v0.0.4 h1:SGH10hXpBJhhTlObuZzTuFn1rrdmjQImITXnZVPSodc= github.com/cevatbarisyilmaz/ara v0.0.4/go.mod h1:BfFOxnUd6Mj6xmcvRxHN3Sr21Z1T3U2MYkYOmoQe4Ts= -github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= -github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= -github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -39,8 +39,8 @@ github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBv github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= -github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= -github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= +github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= @@ -51,12 +51,14 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= -github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= +github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -73,10 +75,8 @@ github.com/johannesboyne/gofakes3 v0.0.0-20240914150247-80d9d7b1b250/go.mod h1:m github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -113,6 +113,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= +github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8= @@ -153,46 +157,52 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= +github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU= -golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI= golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -204,40 +214,40 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= -golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190829051458-42f498d34c4d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= -google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -249,4 +259,3 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= diff --git a/sda-download/internal/config/config.go b/sda-download/internal/config/config.go index 2e9a7129c..18517aec6 100644 --- a/sda-download/internal/config/config.go +++ b/sda-download/internal/config/config.go @@ -11,6 +11,7 @@ import ( "strings" "time" + "github.com/Masterminds/semver/v3" "github.com/lestrrat-go/jwx/v2/jwk" "github.com/neicnordic/crypt4gh/keys" "github.com/neicnordic/sda-download/internal/storage" @@ -24,7 +25,7 @@ const S3 = "s3" // availableMiddlewares list the options for middlewares // empty string "" is an alias for default, for when the config key is not set, or it's empty -var availableMiddlewares = []string{"", "default"} +var availableMiddlewares = []string{"", "default", "token-clientversion"} // Config is a global configuration value store var Config Map @@ -60,6 +61,12 @@ type AppConfig struct { // Selected middleware for authentication and authorizaton // Optional. Default value is "default" for TokenMiddleware Middleware string + + // Minimal version string for the sda-cli client (e.g., "v1.2.3") + // If the client version header does not match this, the request is blocked. + // Optional. Default value is "v0.0.0" + MinimalCliVersion *semver.Version + MinimalCliVersionStr string // This is the original string from the config file } // Stores the Crypt4GH private key used internally @@ -242,6 +249,7 @@ func (c *Map) applyDefaults() { viper.SetDefault("app.host", "0.0.0.0") viper.SetDefault("app.port", 8080) viper.SetDefault("app.middleware", "default") + viper.SetDefault("app.minimalcliversion", "v0.0.0") viper.SetDefault("session.expiration", -1) viper.SetDefault("session.secure", true) viper.SetDefault("session.httponly", true) @@ -371,13 +379,20 @@ func (c *Map) appConfig() error { c.App.ServerKey = viper.GetString("app.serverkey") c.App.Middleware = viper.GetString("app.middleware") + // Validate and parse the configured minimum client version into a SemVer object + c.App.MinimalCliVersionStr = viper.GetString("app.minimalcliversion") + parsedVersion, err := semver.NewVersion(c.App.MinimalCliVersionStr) + if err != nil { + return fmt.Errorf("app.minimalcliversion value='%s' is not a valid semantic version: %v", c.App.MinimalCliVersionStr, err) + } + c.App.MinimalCliVersion = parsedVersion + if c.App.Port != 443 && c.App.Port != 8080 { c.App.Port = viper.GetInt("app.port") } else if c.App.ServerCert != "" && c.App.ServerKey != "" { c.App.Port = 443 } - var err error if viper.GetString("c4gh.transientKeyPath") != "" { if !viper.IsSet("c4gh.transientPassphrase") { return errors.New("c4gh.transientPassphrase is not set") @@ -500,7 +515,7 @@ func GetC4GHKeys() ([32]byte, string, error) { if err != nil { return [32]byte{}, "", fmt.Errorf("error when reading private key: %v", err) } - keyFile.Close() + _ = keyFile.Close() public := keys.DerivePublicKey(private) pem := bytes.Buffer{} diff --git a/sda-download/internal/config/config_test.go b/sda-download/internal/config/config_test.go index e656301de..e06308ec0 100644 --- a/sda-download/internal/config/config_test.go +++ b/sda-download/internal/config/config_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/Masterminds/semver/v3" "github.com/lestrrat-go/jwx/v2/jwk" "github.com/neicnordic/crypt4gh/keys" "github.com/spf13/viper" @@ -83,16 +84,26 @@ func (ts *TestSuite) TestAppConfig() { viper.Set("db.sslmode", "disable") viper.Set("app.middleware", "noexist") + viper.Set("app.minimalcliversion", "v0.2.0") c := &Map{} err = c.appConfig() assert.Error(ts.T(), err, "Error expected") viper.Reset() + // Test fail on invalid minimal client version + viper.Set("app.minimalcliversion", "not-a-semver") + c = &Map{} + err = c.appConfig() + assert.Error(ts.T(), err, "Error expected for invalid semver string") + assert.Contains(ts.T(), err.Error(), "'not-a-semver' is not a valid semantic version") + viper.Reset() + viper.Set("app.host", "test") viper.Set("app.port", 1234) viper.Set("app.servercert", "test") viper.Set("app.serverkey", "test") + viper.Set("app.minimalcliversion", "v0.2.0") viper.Set("log.logLevel", "debug") viper.Set("db.sslmode", "disable") viper.Set("c4gh.transientKeyPath", privateKeyFile.Name()) @@ -108,6 +119,11 @@ func (ts *TestSuite) TestAppConfig() { assert.NotEmpty(ts.T(), c.C4GH.PrivateKey) assert.NotEmpty(ts.T(), c.C4GH.PublicKeyB64) + // Assert MinimalCliVersion + parsedVersion, _ := semver.NewVersion("v0.2.0") + assert.NotNil(ts.T(), c.App.MinimalCliVersion, "MinimalCliVersion should be parsed and not nil") + assert.True(ts.T(), parsedVersion.Equal(c.App.MinimalCliVersion), "Parsed version does not match minimal version") + // Check the private key that was loaded by checking the derived public key publicKey, err := base64.StdEncoding.DecodeString(c.C4GH.PublicKeyB64) assert.Nilf(ts.T(), err, "Incorrect public c4gh key generated (error in base64 encoding)") diff --git a/sda-download/internal/database/database.go b/sda-download/internal/database/database.go index ec46a393d..8e3999992 100644 --- a/sda-download/internal/database/database.go +++ b/sda-download/internal/database/database.go @@ -24,15 +24,11 @@ type SQLdb struct { ConnInfo string } -// FileInfo is returned by the metadata endpoint +// FileInfo is returned by the metadata/datasets/*dataset/files endpoint type FileInfo struct { FileID string `json:"fileId"` - DatasetID string `json:"datasetId"` DisplayFileName string `json:"displayFileName"` FilePath string `json:"filePath"` - EncryptedFileSize int64 `json:"encryptedFileSize"` - EncryptedFileChecksum string `json:"encryptedFileChecksum"` - EncryptedFileChecksumType string `json:"encryptedFileChecksumType"` DecryptedFileSize int64 `json:"decryptedFileSize"` DecryptedFileChecksum string `json:"decryptedFileChecksum"` DecryptedFileChecksumType string `json:"decryptedFileChecksumType"` @@ -118,7 +114,7 @@ func (dbs *SQLdb) checkAndReconnectIfNeeded() { for dbs.DB.Ping() != nil { log.Errorln("Database unreachable, reconnecting") - dbs.DB.Close() + _ = dbs.DB.Close() if time.Since(start) > dbReconnectTimeout { logFatalf("Could not reconnect to failed database in reasonable time, giving up") @@ -151,9 +147,11 @@ var GetFiles = func(datasetID string) ([]*FileInfo, error) { return r, err } +// removeUserIDPrefix strips the user id prefix from a file path func removeUserIDPrefix(filePath, userID string) string { + sanitizedUserID := strings.ReplaceAll(userID, "@", "_") // Construct the full prefix we expect to find (userID + "/"). - fullPrefix := userID + "/" + fullPrefix := sanitizedUserID + "/" if strings.HasPrefix(filePath, fullPrefix) { return strings.TrimPrefix(filePath, fullPrefix) } @@ -161,15 +159,6 @@ func removeUserIDPrefix(filePath, userID string) string { return filePath } -// processFileInfo removes any sensitive information from the file info -func processFileInfo(fi *FileInfo, userID string) error { - // Remove userids from file paths - userID = strings.ReplaceAll(userID, "@", "_") // in filePath, @ is replaced with _ - fi.FilePath = removeUserIDPrefix(fi.FilePath, userID) - - return nil -} - // getFiles is the actual function performing work for GetFile func (dbs *SQLdb) getFiles(datasetID string) ([]*FileInfo, error) { dbs.checkAndReconnectIfNeeded() @@ -178,25 +167,18 @@ func (dbs *SQLdb) getFiles(datasetID string) ([]*FileInfo, error) { db := dbs.DB const query = ` - SELECT files.stable_id AS id, - datasets.stable_id AS dataset_id, - reverse(split_part(reverse(files.submission_file_path::text), '/'::text, 1)) AS display_file_name, - files.submission_user AS user_id, - files.submission_file_path AS file_path, - files.archive_file_size AS file_size, - lef.archive_file_checksum AS encrypted_file_checksum, - lef.archive_file_checksum_type AS encrypted_file_checksum_type, - files.decrypted_file_size, - sha.checksum AS decrypted_file_checksum, - sha.type AS decrypted_file_checksum_type - FROM sda.files - JOIN sda.file_dataset ON file_id = files.id - JOIN sda.datasets ON file_dataset.dataset_id = datasets.id - LEFT JOIN local_ega.files lef ON files.stable_id = lef.stable_id - LEFT JOIN (SELECT file_id, (ARRAY_AGG(event ORDER BY started_at DESC))[1] AS event FROM sda.file_event_log GROUP BY file_id) log ON files.id = log.file_id - LEFT JOIN (SELECT file_id, checksum, type FROM sda.checksums WHERE source = 'UNENCRYPTED') sha ON files.id = sha.file_id - WHERE datasets.stable_id = $1; - ` +SELECT files.stable_id AS id, + reverse(split_part(reverse(files.submission_file_path::text), '/'::text, 1)) AS display_file_name, + files.submission_user AS user_id, + files.submission_file_path AS file_path, + files.decrypted_file_size, + sha_unenc.checksum AS decrypted_file_checksum, + sha_unenc.type AS decrypted_file_checksum_type +FROM sda.files + JOIN sda.file_dataset file_dataset ON file_dataset.file_id = files.id + JOIN sda.datasets datasets ON file_dataset.dataset_id = datasets.id + LEFT JOIN sda.checksums sha_unenc ON files.id = sha_unenc.file_id AND sha_unenc.source = 'UNENCRYPTED' +WHERE datasets.stable_id = $1;` // nolint:rowserrcheck rows, err := db.Query(query, datasetID) @@ -213,9 +195,8 @@ func (dbs *SQLdb) getFiles(datasetID string) ([]*FileInfo, error) { for rows.Next() { // Read rows into struct fi := &FileInfo{} - err := rows.Scan(&fi.FileID, &fi.DatasetID, &fi.DisplayFileName, + err := rows.Scan(&fi.FileID, &fi.DisplayFileName, &userID, &fi.FilePath, - &fi.EncryptedFileSize, &fi.EncryptedFileChecksum, &fi.EncryptedFileChecksumType, &fi.DecryptedFileSize, &fi.DecryptedFileChecksum, &fi.DecryptedFileChecksumType) if err != nil { log.Error(err) @@ -223,23 +204,8 @@ func (dbs *SQLdb) getFiles(datasetID string) ([]*FileInfo, error) { return nil, err } - // NOTE FOR ENCRYPTED DOWNLOAD - // As of now, encrypted download is not supported. When implementing encrypted download, note that - // local_ega_ebi.file:file_size is the size of the file body in the archive without the header, - // so the user needs to know the size of the header when downloading in encrypted format. - // A way to get this could be: - // fd := GetFile() - // fi.EncryptedFileSize = fi.EncryptedFileSize + len(fd.Header) - // But if the header is re-encrypted or a completely new header is generated, the length - // needs to be conveyd to the user in some other way. - // Process file info so that we don't leak any unneccessary info. - err = processFileInfo(fi, userID) - if err != nil { - log.Error(err) - - return nil, err - } + fi.FilePath = removeUserIDPrefix(fi.FilePath, userID) // Add structs to array files = append(files, fi) @@ -322,17 +288,17 @@ func (dbs *SQLdb) getDatasetInfo(datasetID string) (*DatasetInfo, error) { return dataset, nil } -// GetDatasetFileInfo returns information on a file given a dataset ID and an +// GetDatasetFileStableID returns the stable id of a file given a dataset ID and an // upload file path -var GetDatasetFileInfo = func(datasetID, filePath string) (*FileInfo, error) { +var GetDatasetFileStableID = func(datasetID, filePath string) (string, error) { var ( - d *FileInfo - err error - count int + fileStableID string + err error + count int ) for count < dbRetryTimes { - d, err = DB.getDatasetFileInfo(datasetID, filePath) + fileStableID, err = DB.getDatasetFileStableID(datasetID, filePath) if err != nil { count++ @@ -342,68 +308,36 @@ var GetDatasetFileInfo = func(datasetID, filePath string) (*FileInfo, error) { break } - return d, err + return fileStableID, err } // getDatasetFileInfo is the actual function performing work for GetFile -func (dbs *SQLdb) getDatasetFileInfo(datasetID, filePath string) (*FileInfo, error) { +func (dbs *SQLdb) getDatasetFileStableID(datasetID, filePath string) (string, error) { dbs.checkAndReconnectIfNeeded() - file := &FileInfo{} db := dbs.DB const query = ` - SELECT f.stable_id AS file_id, - d.stable_id AS dataset_id, - reverse(split_part(reverse(f.submission_file_path::text), '/'::text, 1)) AS display_file_name, - f.submission_user AS user_id, - f.submission_file_path AS file_path, - f.archive_file_size AS file_size, - lef.archive_file_checksum AS encrypted_file_checksum, - lef.archive_file_checksum_type AS encrypted_file_checksum_type, - f.decrypted_file_size, - dc.checksum AS decrypted_file_checksum, - dc.type AS decrypted_file_checksum_type - FROM sda.files f - JOIN sda.file_dataset fd ON fd.file_id = f.id - JOIN sda.datasets d ON fd.dataset_id = d.id - LEFT JOIN local_ega.files lef ON f.stable_id = lef.stable_id - LEFT JOIN (SELECT file_id, - (ARRAY_AGG(event ORDER BY started_at DESC))[1] AS event - FROM sda.file_event_log - GROUP BY file_id) e - ON f.id = e.file_id - LEFT JOIN (SELECT file_id, checksum, type - FROM sda.checksums - WHERE source = 'UNENCRYPTED') dc - ON f.id = dc.file_id - WHERE d.stable_id = $1 AND f.submission_file_path ~ ('^[^/]*/?' || $2);` +SELECT files.stable_id +FROM sda.files + JOIN sda.file_dataset file_dataset ON file_dataset.file_id = files.id + JOIN sda.datasets datasets ON file_dataset.dataset_id = datasets.id + WHERE datasets.stable_id = $1 AND files.submission_file_path ~ ('^[^/]*/?' || $2);` // regexp matching in the submission file path in order to disregard the // first slash-separated path element. The first path element is the id of // the uploading user which should not be displayed. - var userID string + var fileStableID string // nolint:rowserrcheck - err := db.QueryRow(query, datasetID, filePath).Scan(&file.FileID, - &file.DatasetID, &file.DisplayFileName, &userID, &file.FilePath, - &file.EncryptedFileSize, &file.EncryptedFileChecksum, &file.EncryptedFileChecksumType, - &file.DecryptedFileSize, &file.DecryptedFileChecksum, &file.DecryptedFileChecksumType) - - if err != nil { - log.Error(err) - - return nil, err - } + err := db.QueryRow(query, datasetID, filePath).Scan(&fileStableID) - // Process file info so that we don't leak any unneccessary info. - err = processFileInfo(file, userID) if err != nil { log.Error(err) - return nil, err + return "", err } - return file, nil + return fileStableID, nil } // CheckFilePermission checks if user has permissions to access the dataset the file is a part of @@ -527,5 +461,5 @@ func (dbs *SQLdb) getFile(fileID string) (*FileDownload, error) { // Close terminates the connection to the database func (dbs *SQLdb) Close() { db := dbs.DB - db.Close() + _ = db.Close() } diff --git a/sda-download/internal/database/database_test.go b/sda-download/internal/database/database_test.go index ca8d60813..501a878fc 100644 --- a/sda-download/internal/database/database_test.go +++ b/sda-download/internal/database/database_test.go @@ -273,75 +273,6 @@ func TestGetDatasetInfo(t *testing.T) { log.SetOutput(os.Stdout) } -func TestGetDatasetFileInfo(t *testing.T) { - r := sqlTesterHelper(t, func(mock sqlmock.Sqlmock, testDb *SQLdb) error { - expected := &FileInfo{ - FileID: "file1", - DatasetID: "dataset1", - DisplayFileName: "file.txt", - FilePath: "dir/file.txt", - EncryptedFileSize: 60, - EncryptedFileChecksum: "hash", - EncryptedFileChecksumType: "sha256", - DecryptedFileSize: 32, - DecryptedFileChecksum: "hash", - DecryptedFileChecksumType: "sha256", - } - userID := "user1" - - query := ` - SELECT f.stable_id AS file_id, - d.stable_id AS dataset_id, - reverse\(split_part\(reverse\(f.submission_file_path::text\), '/'::text, 1\)\) AS display_file_name, - f.submission_user AS user_id, - f.submission_file_path AS file_path, - f.archive_file_size AS file_size, - lef.archive_file_checksum AS encrypted_file_checksum, - lef.archive_file_checksum_type AS encrypted_file_checksum_type, - f.decrypted_file_size, - dc.checksum AS decrypted_file_checksum, - dc.type AS decrypted_file_checksum_type - FROM sda.files f - JOIN sda.file_dataset fd ON fd.file_id = f.id - JOIN sda.datasets d ON fd.dataset_id = d.id - LEFT JOIN local_ega.files lef ON f.stable_id = lef.stable_id - LEFT JOIN \(SELECT file_id, - \(ARRAY_AGG\(event ORDER BY started_at DESC\)\)\[1\] AS event - FROM sda.file_event_log - GROUP BY file_id\) e - ON f.id = e.file_id - LEFT JOIN \(SELECT file_id, checksum, type - FROM sda.checksums - WHERE source = 'UNENCRYPTED'\) dc - ON f.id = dc.file_id - WHERE d.stable_id = \$1 AND f.submission_file_path ~ \('\^\[\^\/\]\*/\?' \|\| \$2\);` - mock.ExpectQuery(query). - WithArgs("dataset1", "file1"). - WillReturnRows(sqlmock.NewRows([]string{"file_id", "dataset_id", - "display_file_name", "user_id", "file_path", "file_size", - "encrypted_file_checksum", "encrypted_file_checksum_type", "decrypted_file_size", "decrypted_file_checksum", - "decrypted_file_checksum_type"}).AddRow(expected.FileID, expected.DatasetID, - expected.DisplayFileName, userID, expected.FilePath, - expected.EncryptedFileSize, expected.EncryptedFileChecksum, expected.EncryptedFileChecksumType, expected.DecryptedFileSize, - expected.DecryptedFileChecksum, expected.DecryptedFileChecksumType)) - - x, err := testDb.getDatasetFileInfo("dataset1", "file1") - - assert.Equal(t, expected, x, "did not get expected file values") - - return err - }) - - assert.Nil(t, r, "checkDataset failed unexpectedly") - - var buf bytes.Buffer - log.SetOutput(&buf) - - buf.Reset() - - log.SetOutput(os.Stdout) -} - func TestGetFile(t *testing.T) { r := sqlTesterHelper(t, func(mock sqlmock.Sqlmock, testDb *SQLdb) error { expected := &FileDownload{ @@ -388,66 +319,3 @@ func TestGetFile(t *testing.T) { log.SetOutput(os.Stdout) } - -func TestGetFiles(t *testing.T) { - r := sqlTesterHelper(t, func(mock sqlmock.Sqlmock, testDb *SQLdb) error { - expected := []*FileInfo{} - fileInfo := &FileInfo{ - FileID: "file1", - DatasetID: "dataset1", - DisplayFileName: "file.txt", - FilePath: "dir/file.txt", - EncryptedFileSize: 60, - EncryptedFileChecksum: "hash", - EncryptedFileChecksumType: "sha256", - DecryptedFileSize: 32, - DecryptedFileChecksum: "hash", - DecryptedFileChecksumType: "sha256", - } - userID := "user1" - - expected = append(expected, fileInfo) - query := ` - SELECT files.stable_id AS id, - datasets.stable_id AS dataset_id, - reverse\(split_part\(reverse\(files.submission_file_path::text\), '/'::text, 1\)\) AS display_file_name, - files.submission_user AS user_id, - files.submission_file_path AS file_path, - files.archive_file_size AS file_size, - lef.archive_file_checksum AS encrypted_file_checksum, - lef.archive_file_checksum_type AS encrypted_file_checksum_type, - files.decrypted_file_size, - sha.checksum AS decrypted_file_checksum, - sha.type AS decrypted_file_checksum_type - FROM sda.files - JOIN sda.file_dataset ON file_id = files.id - JOIN sda.datasets ON file_dataset.dataset_id = datasets.id - LEFT JOIN local_ega.files lef ON files.stable_id = lef.stable_id - LEFT JOIN \(SELECT file_id, \(ARRAY_AGG\(event ORDER BY started_at DESC\)\)\[1\] AS event FROM sda.file_event_log GROUP BY file_id\) log ON files.id = log.file_id - LEFT JOIN \(SELECT file_id, checksum, type FROM sda.checksums WHERE source = 'UNENCRYPTED'\) sha ON files.id = sha.file_id - WHERE datasets.stable_id = \$1; - ` - mock.ExpectQuery(query). - WithArgs("dataset1"). - WillReturnRows(sqlmock.NewRows([]string{"file_id", "dataset_id", - "display_file_name", "user_id", "file_path", "file_size", "encrypted_file_checksum", - "encrypted_file_checksum_type", "decrypted_file_size", "decrypted_file_checksum", "decrypted_file_checksum_type"}).AddRow(fileInfo.FileID, fileInfo.DatasetID, - fileInfo.DisplayFileName, userID, fileInfo.FilePath, - fileInfo.EncryptedFileSize, fileInfo.EncryptedFileChecksum, fileInfo.EncryptedFileChecksumType, fileInfo.DecryptedFileSize, - fileInfo.DecryptedFileChecksum, fileInfo.DecryptedFileChecksumType)) - - x, err := testDb.getFiles("dataset1") - assert.Equal(t, expected, x, "did not get expected file details") - - return err - }) - - assert.Nil(t, r, "getFiles failed unexpectedly") - - var buf bytes.Buffer - log.SetOutput(&buf) - - buf.Reset() - - log.SetOutput(os.Stdout) -} diff --git a/sda-download/internal/storage/seekable_test.go b/sda-download/internal/storage/seekable_test.go index 7e9211a27..83598a53e 100644 --- a/sda-download/internal/storage/seekable_test.go +++ b/sda-download/internal/storage/seekable_test.go @@ -47,7 +47,7 @@ func TestSeekableBackend(t *testing.T) { assert.Equal(t, len(writeData), written, "Did not write all writeData") } - writer.Close() + _ = writer.Close() reader, err := backend.NewFileReadSeeker(path) assert.Nil(t, err, "s3 NewFileReadSeeker failed when it should work") @@ -182,7 +182,7 @@ func TestS3SeekablePrefetchSize(t *testing.T) { assert.NotNil(t, writer, "Got a nil reader for writer from s3") assert.Nil(t, err, "posix NewFileWriter failed when it shouldn't") - writer.Close() + _ = writer.Close() reader, err := backend.NewFileReadSeeker(path) assert.Nil(t, err, "s3 NewFileReadSeeker failed when it should work") @@ -224,7 +224,7 @@ func TestS3SeekableSpecial(t *testing.T) { assert.Equal(t, len(writeData), written, "Did not write all writeData") } - writer.Close() + _ = writer.Close() reader, err := backend.NewFileReadSeeker(path) reader.(*s3Reader).seeked = true diff --git a/sda-download/internal/storage/storage_test.go b/sda-download/internal/storage/storage_test.go index 6bd0921a3..70352eb61 100644 --- a/sda-download/internal/storage/storage_test.go +++ b/sda-download/internal/storage/storage_test.go @@ -74,7 +74,7 @@ func writeName() (name string, err error) { func doCleanup() { for _, name := range cleanupFiles { - os.Remove(name) + _ = os.Remove(name) } cleanupFiles = cleanupFilesBack[0:0] @@ -138,7 +138,7 @@ func TestPosixBackend(t *testing.T) { assert.Nil(t, err, "Failure when writing to posix writer") assert.Equal(t, len(writeData), written, "Did not write all writeData") - writer.Close() + _ = writer.Close() log.SetOutput(&buf) writer, err = backend.NewFileWriter(posixNotCreatable) @@ -309,7 +309,7 @@ func TestS3Backend(t *testing.T) { assert.Nil(t, err, "Failure when writing to s3 writer") assert.Equal(t, len(writeData), written, "Did not write all writeData") - writer.Close() + _ = writer.Close() reader, err := s3back.NewFileReader(s3Creatable) assert.Nil(t, err, "s3 NewFileReader failed when it should work") diff --git a/sda-sftp-inbox/pom.xml b/sda-sftp-inbox/pom.xml index 37324fd37..114218151 100644 --- a/sda-sftp-inbox/pom.xml +++ b/sda-sftp-inbox/pom.xml @@ -14,7 +14,7 @@ org.springframework.boot spring-boot-starter-parent - 3.5.5 + 3.5.7 @@ -29,7 +29,7 @@ org.junit junit-bom - 5.13.4 + 6.0.1 pom import @@ -103,27 +103,27 @@ commons-io commons-io - 2.20.0 + 2.21.0 com.amazonaws aws-java-sdk-s3 - 1.12.791 + 1.12.793 com.google.guava guava - 33.4.8-jre + 33.5.0-jre net.logstash.logback logstash-logback-encoder - 8.1 + 9.0 org.bouncycastle bcprov-jdk18on - 1.81 + 1.82 @@ -184,12 +184,12 @@ maven-surefire-plugin - 3.5.3 + 3.5.4 org.apache.maven.plugins maven-failsafe-plugin - 3.5.3 + 3.5.4 diff --git a/sda/cmd/api/api.go b/sda/cmd/api/api.go index b0bfe2255..df629efa5 100644 --- a/sda/cmd/api/api.go +++ b/sda/cmd/api/api.go @@ -317,40 +317,76 @@ func getFiles(c *gin.Context) { c.JSON(200, files) } +/* +ingestFile handles requests to initiate ingestion of a file. +This endpoint supports two input modes: +1. By file ID (via the "fileid" query parameter): Looks up the user and file path from the database. +2. By JSON payload: Expects a JSON body with user and file path. +The function constructs an ingest message, validates it +and sends it to the broker with the appropriate file ID. +*/ func ingestFile(c *gin.Context) { - var ingest schema.IngestionTrigger - if err := c.BindJSON(&ingest); err != nil { - c.AbortWithStatusJSON( - http.StatusBadRequest, - gin.H{ - "error": "json decoding : " + err.Error(), - "status": http.StatusBadRequest, - }, - ) + var ( + ingest schema.IngestionTrigger + fileID string + ) + switch { + case c.Query("fileid") != "" && c.Request.ContentLength > 0: + c.AbortWithStatusJSON(http.StatusBadRequest, "both file ID parameter and payload provided.") return - } + case c.Query("fileid") != "": + // Get the user and the inbox filepath + fileDetails, err := Conf.API.DB.GetFileDetailsFromUUID(c.Query("fileid"), "uploaded") + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, "file information not found") - ingest.Type = "ingest" - marshaledMsg, _ := json.Marshal(&ingest) - if err := schema.ValidateJSON(fmt.Sprintf("%s/ingestion-trigger.json", Conf.Broker.SchemasPath), marshaledMsg); err != nil { - c.AbortWithStatusJSON(http.StatusBadRequest, err.Error()) + return + } + // Add file info in the message payload + ingest.User = fileDetails.User + ingest.FilePath = fileDetails.Path + fileID = c.Query("fileid") + + case c.Request.ContentLength > 0: + // Bind ingest and payload + if err = c.BindJSON(&ingest); err != nil { + c.AbortWithStatusJSON( + http.StatusBadRequest, + gin.H{ + "error": "json decoding : " + err.Error(), + "status": http.StatusBadRequest, + }, + ) + + return + } + fileID, err = Conf.API.DB.GetFileIDByUserPathAndStatus(ingest.User, ingest.FilePath, "uploaded") + if err != nil { + if fileID == "" { + c.AbortWithStatusJSON(http.StatusBadRequest, err.Error()) + } else { + c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + } + + return + } + default: + c.AbortWithStatusJSON(http.StatusBadRequest, "missing parameter or payload") return } + // Add type in message payload + ingest.Type = "ingest" - corrID, err := Conf.API.DB.GetCorrID(ingest.User, ingest.FilePath, "") - if err != nil { - if corrID == "" { - c.AbortWithStatusJSON(http.StatusBadRequest, err.Error()) - } else { - c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) - } + marshaledMsg, _ := json.Marshal(&ingest) + if err := schema.ValidateJSON(fmt.Sprintf("%s/ingestion-trigger.json", Conf.Broker.SchemasPath), marshaledMsg); err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, err.Error()) return } - err = Conf.API.MQ.SendMessage(corrID, Conf.Broker.Exchange, "ingest", marshaledMsg) + err = Conf.API.MQ.SendMessage(fileID, Conf.Broker.Exchange, "ingest", marshaledMsg) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) @@ -407,7 +443,7 @@ func deleteFile(c *gin.Context) { time.Sleep(time.Duration(math.Pow(2, float64(count))) * time.Second) } - if err := Conf.API.DB.UpdateFileEventLog(fileID, "disabled", fileID, "api", "{}", "{}"); err != nil { + if err := Conf.API.DB.UpdateFileEventLog(fileID, "disabled", "api", "{}", "{}"); err != nil { log.Errorf("set status deleted failed, reason: (%v)", err) c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) @@ -534,41 +570,94 @@ func downloadFile(c *gin.Context) { c.Status(http.StatusOK) } +/* +setAccession handles requests to assign an accession ID to a file. +This endpoint supports two input modes: +1. By query parameters ("fileid" and "accessionid"): Retrieves user, file path, and decrypted checksum from the database using the file ID. +2. By JSON payload: Expects a JSON body with user and file path, then looks up the file ID and decrypted checksum. +If both query parameters and a JSON payload are provided, the request is rejected with a 400 Bad Request. +The function constructs an accession message, validates it and sends it to the message broker. +*/ func setAccession(c *gin.Context) { - var accession schema.IngestionAccession - if err := c.BindJSON(&accession); err != nil { - c.AbortWithStatusJSON( - http.StatusBadRequest, - gin.H{ - "error": "json decoding : " + err.Error(), - "status": http.StatusBadRequest, - }, - ) + var ( + accession schema.IngestionAccession + fileID string + ) + hasQuery := c.Query("fileid") != "" || c.Query("accessionid") != "" + missingAccession := c.Query("fileid") != "" && c.Query("accessionid") == "" + hasBody := c.Request.ContentLength > 0 + switch { + case hasQuery && hasBody: + c.AbortWithStatusJSON(http.StatusBadRequest, "both parameters and json payload provided. Choose one") return - } + case missingAccession: + c.AbortWithStatusJSON(http.StatusBadRequest, "accessionid is not provided") - corrID, err := Conf.API.DB.GetCorrID(accession.User, accession.FilePath, "") - if err != nil { - if corrID == "" { - c.AbortWithStatusJSON(http.StatusBadRequest, err.Error()) - } else { - c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + return + case c.Query("fileid") != "" && c.Query("accessionid") != "": + // Get the user and the inbox filepath + fileDetails, err := Conf.API.DB.GetFileDetailsFromUUID(c.Query("fileid"), "verified") + if err != nil { + c.AbortWithStatusJSON(http.StatusBadRequest, "file details not found") + + return } + // Get the decrypted checksum + fileDecrChecksum, err := Conf.API.DB.GetDecryptedChecksum(c.Query("fileid")) + if err != nil { + log.Debugln(err.Error()) + c.AbortWithStatusJSON(http.StatusInternalServerError, "required data missing") - return - } + return + } + // Add info in message payload + accession.AccessionID = c.Query("accessionid") + accession.User = fileDetails.User + accession.FilePath = fileDetails.Path + accession.DecryptedChecksums = []schema.Checksums{{Type: "sha256", Value: fileDecrChecksum}} + fileID = c.Query("fileid") + + case c.Request.ContentLength > 0: + if err = c.BindJSON(&accession); err != nil { + c.AbortWithStatusJSON( + http.StatusBadRequest, + gin.H{ + "error": "json decoding : " + err.Error(), + "status": http.StatusBadRequest, + }, + ) - fileInfo, err := Conf.API.DB.GetFileInfo(corrID) - if err != nil { - log.Debugln(err.Error()) - c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + return + } + fileID, err = Conf.API.DB.GetFileIDByUserPathAndStatus(accession.User, accession.FilePath, "verified") + if err != nil { + if fileID == "" { + c.AbortWithStatusJSON(http.StatusBadRequest, err.Error()) + } else { + c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + } + + return + } + // Get decrypted checksum + fileDecrChecksum, err := Conf.API.DB.GetDecryptedChecksum(fileID) + if err != nil { + log.Debugln(err.Error()) + c.AbortWithStatusJSON(http.StatusNotFound, "decrypted checksum not found") + + return + } + // Add decrypted checksum in message payload + accession.DecryptedChecksums = []schema.Checksums{{Type: "sha256", Value: fileDecrChecksum}} + default: + c.AbortWithStatusJSON(http.StatusBadRequest, "missing parameter or payload") return } - - accession.DecryptedChecksums = []schema.Checksums{{Type: "sha256", Value: fileInfo.DecryptedChecksum}} + // Add type in the message payload accession.Type = "accession" + marshaledMsg, _ := json.Marshal(&accession) if err := schema.ValidateJSON(fmt.Sprintf("%s/ingestion-accession.json", Conf.Broker.SchemasPath), marshaledMsg); err != nil { log.Debugln(err.Error()) @@ -577,7 +666,7 @@ func setAccession(c *gin.Context) { return } - err = Conf.API.MQ.SendMessage(corrID, Conf.Broker.Exchange, "accession", marshaledMsg) + err = Conf.API.MQ.SendMessage(fileID, Conf.Broker.Exchange, "accession", marshaledMsg) if err != nil { log.Debugln(err.Error()) c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) @@ -608,36 +697,19 @@ func createDataset(c *gin.Context) { return } + // Check that the files the accession ids are linked to belong to the user of the dataset for _, stableID := range dataset.AccessionIDs { - inboxPath, err := Conf.API.DB.GetInboxPath(stableID) + belongsToUser, err := Conf.API.DB.CheckStableIDOwnedByUser(stableID, dataset.User) if err != nil { - switch { - case err.Error() == "sql: no rows in result set": - log.Errorln(err.Error()) - c.AbortWithStatusJSON(http.StatusBadRequest, fmt.Sprintf("accession ID not found: %s", stableID)) - - return - default: - log.Errorln(err.Error()) - c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + log.Errorln(err.Error()) + c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) - return - } + return } - _, err = Conf.API.DB.GetCorrID(dataset.User, inboxPath, stableID) - if err != nil { - switch { - case err.Error() == "sql: no rows in result set": - log.Errorln(err.Error()) - c.AbortWithStatusJSON(http.StatusBadRequest, "accession ID owned by other user") - - return - default: - log.Errorln(err.Error()) - c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) + if !belongsToUser { + c.AbortWithStatusJSON(http.StatusBadRequest, fmt.Sprintf("accession ID: %s not found or owned by other user", stableID)) - return - } + return } } @@ -907,13 +979,6 @@ func reVerify(c *gin.Context, accessionID string) (*gin.Context, error) { return c, err } - corrID, err := Conf.API.DB.GetCorrID(reVerify.User, reVerify.FilePath, accessionID) - if err != nil { - log.Errorf("failed to get CorrID for %s, %s", reVerify.User, reVerify.FilePath) - c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) - - return c, err - } marshaledMsg, _ := json.Marshal(&reVerify) if err := schema.ValidateJSON(fmt.Sprintf("%s/ingestion-verification.json", Conf.Broker.SchemasPath), marshaledMsg); err != nil { @@ -923,7 +988,7 @@ func reVerify(c *gin.Context, accessionID string) (*gin.Context, error) { return c, err } - err = Conf.API.MQ.SendMessage(corrID, Conf.Broker.Exchange, "archived", marshaledMsg) + err = Conf.API.MQ.SendMessage(reVerify.FileID, Conf.Broker.Exchange, "archived", marshaledMsg) if err != nil { c.AbortWithStatusJSON(http.StatusInternalServerError, err.Error()) diff --git a/sda/cmd/api/api.md b/sda/cmd/api/api.md index 1a124c813..ec5729f01 100644 --- a/sda/cmd/api/api.md +++ b/sda/cmd/api/api.md @@ -49,37 +49,57 @@ Endpoints: Admin endpoints are only available to a set of whitelisted users specified in the application config. - `/file/ingest` - - accepts `POST` requests with JSON data with the format: `{"filepath": "", "user": ""}` + - accepts `POST` requests with either: + - A JSON payload: `{"filepath": "", "user": ""}` + - OR a `fileid` query parameter: `/file/ingest?fileid=` - triggers the ingestion of the file. + - If both a JSON payload and a `fileid` query parameter are provided in the same request, a `400 Bad Request` is returned. + - Error codes - - `200` Query execute ok. - - `400` Error due to bad payload i.e. wrong `user` + `filepath` combination. + - `200` Query executed successfully. + - `400` Bad request (e.g. wrong `user` + `filepath` combination, both payload and fileid provided, invalid fileid, or invalid JSON). - `401` Token user is not in the list of admins. - `500` Internal error due to DB or MQ failures. - Example: + Example (JSON payload): ```bash curl -H "Authorization: Bearer $token" -H "Content-Type: application/json" -X POST -d '{"filepath": "/uploads/file.c4gh", "user": "testuser"}' https://HOSTNAME/file/ingest ``` + Example (fileid query parameter): + + ```bash + curl -H "Authorization: Bearer $token" -X POST "https://HOSTNAME/file/ingest?fileid=" + - `/file/accession` - - accepts `POST` requests with JSON data with the format: `{"accession_id": "", "filepath": "", "user": ""}` + - accepts `POST` requests with either: + - A JSON playload: `{"accession_id": "", "filepath": "", "user": ""}` + - OR query parameters: `/file/accession?fileid=&accessionid=` - assigns accession ID to the file. + - If both a JSON payload and query parameters are provided in the same request, a `400 Bad Request` is returned. + - Error codes - - `200` Query execute ok. - - `400` Error due to bad payload i.e. wrong `user` + `filepath` combination. + - `200` Query executed successfully. + - `400` Bad request (e.g. wrong `user` + `filepath` combination, both payload and parameters provided, invalid fileid, or invalid JSON). - `401` Token user is not in the list of admins. + - `404` Decrypted checksum not found. - `500` Internal error due to DB or MQ failures. - Example: + Example (JSON payload): ```bash curl -H "Authorization: Bearer $token" -H "Content-Type: application/json" -X POST -d '{"accession_id": "my-id-01", "filepath": "/uploads/file.c4gh", "user": "testuser"}' https://HOSTNAME/file/accession ``` + Example (query parameters): + + ```bash + curl -H "Authorization: Bearer $token" -X POST "https://HOSTNAME/file/accession?fileid=&accessionid=" + ``` + - `/file/verify/:accession` - accepts `PUT` requests with an accession ID as the last element in the query - triggers re-verification of the file with the specific accession ID. diff --git a/sda/cmd/api/api_test.go b/sda/cmd/api/api_test.go index 85f57fb79..e7d3e9e29 100644 --- a/sda/cmd/api/api_test.go +++ b/sda/cmd/api/api_test.go @@ -9,6 +9,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "hash" "io" "net" "net/http" @@ -25,7 +26,6 @@ import ( "github.com/casbin/casbin/v2" "github.com/casbin/casbin/v2/model" "github.com/gin-gonic/gin" - "github.com/google/uuid" _ "github.com/lib/pq" "github.com/neicnordic/crypt4gh/keys" "github.com/neicnordic/crypt4gh/streaming" @@ -168,7 +168,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { @@ -229,7 +229,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { @@ -297,6 +297,37 @@ type TestSuite struct { GrpcListener GrpcListener } +func helperCreateVerifiedTestFile(s *TestSuite, user, filePath string) (string, hash.Hash) { + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) + assert.NoError(s.T(), err, "failed to register file in database") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") + assert.NoError(s.T(), err, "failed to update status of file in database") + + encSha := sha256.New() + _, err = encSha.Write([]byte("Checksum")) + assert.NoError(s.T(), err) + + decSha := sha256.New() + _, err = decSha.Write([]byte("DecryptedChecksum")) + assert.NoError(s.T(), err) + + fileInfo := database.FileInfo{ + UploadedChecksum: fmt.Sprintf("%x", encSha.Sum(nil)), + Size: 1000, + Path: filePath, + DecryptedChecksum: fmt.Sprintf("%x", decSha.Sum(nil)), + DecryptedSize: 948, + } + err = Conf.API.DB.SetArchived(fileInfo, fileID) + assert.NoError(s.T(), err, "failed to mark file as Archived") + err = Conf.API.DB.SetVerified(fileInfo, fileID) + assert.NoError(s.T(), err, "failed to mark file as Verified") + err = Conf.API.DB.UpdateFileEventLog(fileID, "verified", user, "{}", "{}") + assert.NoError(s.T(), err, "failed to update status of file in database") + + return fileID, decSha +} + func (s *TestSuite) TestShutdown() { Conf = &config.Config{} Conf.Broker = broker.MQConf{ @@ -545,7 +576,7 @@ func (s *TestSuite) TearDownSuite() { s.GrpcListener.gs.GracefulStop() } if s.GrpcListener.Listener != nil { - s.GrpcListener.Listener.Close() + _ = s.GrpcListener.Listener.Close() } } func (s *TestSuite) SetupTest() { @@ -583,7 +614,7 @@ func (s *TestSuite) SetupTest() { req.SetBasicAuth("guest", "guest") res, err := client.Do(req) assert.NoError(s.T(), err, "failed to query broker") - res.Body.Close() + _ = res.Body.Close() } } @@ -638,12 +669,11 @@ func (s *TestSuite) TestAPIGetFiles() { // Insert a file and make sure it is listed file1 := fmt.Sprintf("/%v/TestAPIGetFiles.c4gh", s.User) - fileID, err := Conf.API.DB.RegisterFile(file1, s.User) + fileID, err := Conf.API.DB.RegisterFile(nil, file1, s.User) assert.NoError(s.T(), err, "failed to register file in database") - corrID := uuid.New().String() latestStatus := "uploaded" - err = Conf.API.DB.UpdateFileEventLog(fileID, latestStatus, corrID, s.User, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, latestStatus, s.User, "{}", "{}") assert.NoError(s.T(), err, "got (%v) when trying to update file status") resp, err = client.Do(req) @@ -663,7 +693,7 @@ func (s *TestSuite) TestAPIGetFiles() { s.FailNowf("got (%s) when setting stable ID: %s, %s", err.Error(), "stableID", fileID) } latestStatus = "ready" - err = Conf.API.DB.UpdateFileEventLog(fileID, latestStatus, corrID, s.User, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, latestStatus, s.User, "{}", "{}") assert.NoError(s.T(), err, "got (%v) when trying to update file status") resp, err = client.Do(req) @@ -683,7 +713,7 @@ func (s *TestSuite) TestAPIGetFiles() { // Insert a second file and make sure it is listed file2 := fmt.Sprintf("/%v/TestAPIGetFiles2.c4gh", s.User) - _, err = Conf.API.DB.RegisterFile(file2, s.User) + _, err = Conf.API.DB.RegisterFile(nil, file2, s.User) assert.NoError(s.T(), err, "failed to register file in database") resp, err = client.Do(req) @@ -717,12 +747,12 @@ func (s *TestSuite) TestAPIGetFiles_filteredSelection() { sub = "submission_b" } - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("%s/TestGetUserFiles-00%d.c4gh", sub, i), strings.ReplaceAll(user, "_", "@")) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("%s/TestGetUserFiles-00%d.c4gh", sub, i), strings.ReplaceAll(user, "_", "@")) if err != nil { s.FailNow("failed to register file in database") } - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { s.FailNow("failed to update satus of file in database") } @@ -837,8 +867,8 @@ func (s *TestSuite) TestGinLogLevel_Debug() { logOutput = buf.String() lines = strings.Split(strings.TrimSpace(logOutput), "\n") - fmt.Println("lines : ", lines) - fmt.Println("len(lines) : ", len(lines)) + _, _ = fmt.Println("lines : ", lines) + _, _ = fmt.Println("len(lines) : ", len(lines)) if len(lines) > 1 { assert.NotContains(s.T(), logOutput[len(logOutput)-1], "[GIN]") } else { @@ -914,8 +944,8 @@ func (s *TestSuite) TestGinLogLevel_Info() { logOutput = buf.String() lines = strings.Split(strings.TrimSpace(logOutput), "\n") - fmt.Println("lines : ", lines) - fmt.Println("len(lines) : ", len(lines)) + _, _ = fmt.Println("lines : ", lines) + _, _ = fmt.Println("len(lines) : ", len(lines)) if len(lines) > 1 { assert.NotContains(s.T(), logOutput[len(logOutput)-1], "[GIN]") } else { @@ -1038,7 +1068,8 @@ func (s *TestSuite) TestRBAC_emptyPolicy() { assert.Equal(s.T(), http.StatusUnauthorized, okResponse.StatusCode) assert.Contains(s.T(), string(b), "not authorized") } -func (s *TestSuite) TestIngestFile() { + +func (s *TestSuite) TestIngestFile_WithPayload() { user := "dummy" filePath := "/inbox/dummy/file10.c4gh" m, err := model.NewModelFromString(jsonadapter.Model) @@ -1052,9 +1083,9 @@ func (s *TestSuite) TestIngestFile() { s.FailNow("failed to setup RBAC enforcer") } - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") gin.SetMode(gin.ReleaseMode) @@ -1090,25 +1121,34 @@ func (s *TestSuite) TestIngestFile() { MessagesReady int `json:"messages_ready"` } body, err := io.ReadAll(res.Body) - res.Body.Close() + _ = res.Body.Close() assert.NoError(s.T(), err, "failed to read response from broker") err = json.Unmarshal(body, &data) assert.NoError(s.T(), err, "failed to unmarshal response") assert.Equal(s.T(), 1, data.MessagesReady) } -func (s *TestSuite) TestIngestFile_NoUser() { +func (s *TestSuite) TestIngestFile_WithPayload_NoUser() { user := "dummy" filePath := "/inbox/dummy/file10.c4gh" + m, err := model.NewModelFromString(jsonadapter.Model) + if err != nil { + s.T().Logf("failure: %v", err) + s.FailNow("failed to setup RBAC model") + } + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) + if err != nil { + s.T().Logf("failure: %v", err) + s.FailNow("failed to setup RBAC enforcer") + } - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") gin.SetMode(gin.ReleaseMode) assert.NoError(s.T(), setupJwtAuth()) - Conf.Broker.SchemasPath = "../../schemas/isolated" type ingest struct { @@ -1119,27 +1159,30 @@ func (s *TestSuite) TestIngestFile_NoUser() { // Mock request and response holders w := httptest.NewRecorder() r := httptest.NewRequest(http.MethodPost, "/file/ingest", bytes.NewBuffer(ingestMsg)) + r.Header.Add("Authorization", "Bearer "+s.Token) _, router := gin.CreateTestContext(w) - router.POST("/file/ingest", ingestFile) + router.POST("/file/ingest", rbac(e), ingestFile) router.ServeHTTP(w, r) - okResponse := w.Result() - defer okResponse.Body.Close() - assert.Equal(s.T(), http.StatusBadRequest, okResponse.StatusCode) + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(b), "sql: no rows in result set") } -func (s *TestSuite) TestIngestFile_WrongUser() { + +func (s *TestSuite) TestIngestFile_WithPayload_WrongUser() { user := "dummy" filePath := "/inbox/dummy/file10.c4gh" - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") gin.SetMode(gin.ReleaseMode) assert.NoError(s.T(), setupJwtAuth()) - Conf.Broker.SchemasPath = "../../schemas/isolated" type ingest struct { @@ -1155,10 +1198,10 @@ func (s *TestSuite) TestIngestFile_WrongUser() { router.POST("/file/ingest", ingestFile) router.ServeHTTP(w, r) - okResponse := w.Result() - defer okResponse.Body.Close() - b, _ := io.ReadAll(okResponse.Body) - assert.Equal(s.T(), http.StatusBadRequest, okResponse.StatusCode) + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) assert.Contains(s.T(), string(b), "sql: no rows in result set") } @@ -1166,9 +1209,9 @@ func (s *TestSuite) TestIngestFile_WrongFilePath() { user := "dummy" filePath := "/inbox/dummy/file10.c4gh" - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") gin.SetMode(gin.ReleaseMode) @@ -1190,76 +1233,277 @@ func (s *TestSuite) TestIngestFile_WrongFilePath() { router.POST("/file/ingest", ingestFile) router.ServeHTTP(w, r) + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(b), "sql: no rows in result set") +} + +func (s *TestSuite) TestIngestFile_WithFileID() { + user := "dummy" + filePath := "/inbox/dummy/file11.c4gh" + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) + assert.NoError(s.T(), err) + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") + assert.NoError(s.T(), err) + + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) + m, err := model.NewModelFromString(jsonadapter.Model) + assert.NoError(s.T(), err) + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) + assert.NoError(s.T(), err) + + w := httptest.NewRecorder() + r := httptest.NewRequest("POST", "/file/ingest?fileid="+fileID, nil) + r.Header.Add("Authorization", "Bearer "+s.Token) + + _, router := gin.CreateTestContext(w) + router.POST("/file/ingest", rbac(e), ingestFile) + router.ServeHTTP(w, r) + okResponse := w.Result() defer okResponse.Body.Close() - b, _ := io.ReadAll(okResponse.Body) - assert.Equal(s.T(), http.StatusBadRequest, okResponse.StatusCode) - assert.Contains(s.T(), string(b), "sql: no rows in result set") + assert.Equal(s.T(), http.StatusOK, okResponse.StatusCode) + + // verify that the message shows up in the queue + time.Sleep(10 * time.Second) // this is needed to ensure we don't get any false negatives + client := http.Client{Timeout: 5 * time.Second} + req, _ := http.NewRequest(http.MethodGet, "http://"+BrokerAPI+"/api/queues/sda/ingest", http.NoBody) + req.SetBasicAuth("guest", "guest") + res, err := client.Do(req) + assert.NoError(s.T(), err, "failed to query broker") + var data struct { + MessagesReady int `json:"messages_ready"` + } + body, err := io.ReadAll(res.Body) + _ = res.Body.Close() + assert.NoError(s.T(), err, "failed to read response from broker") + err = json.Unmarshal(body, &data) + assert.NoError(s.T(), err, "failed to unmarshal response") + assert.Equal(s.T(), 1, data.MessagesReady) } -func (s *TestSuite) TestSetAccession() { +func (s *TestSuite) TestIngestFile_WithFileID_WrongID() { user := "dummy" filePath := "/inbox/dummy/file11.c4gh" + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) + assert.NoError(s.T(), err) + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") + assert.NoError(s.T(), err) - fileID, err := Conf.API.DB.RegisterFile(filePath, user) - assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") - assert.NoError(s.T(), err, "failed to update satus of file in database") + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) - encSha := sha256.New() - _, err = encSha.Write([]byte("Checksum")) + w := httptest.NewRecorder() + r := httptest.NewRequest("POST", "/file/ingest?fileid=random-1234", nil) + r.Header.Add("Authorization", "Bearer "+s.Token) + + _, router := gin.CreateTestContext(w) + router.POST("/file/ingest", ingestFile) + router.ServeHTTP(w, r) + + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(b), "file information not found") +} + +func (s *TestSuite) TestIngestFile_BothFileIDAndPayloadProvided() { + user := "dummy" + filePath := "/inbox/dummy/file12.c4gh" + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) + assert.NoError(s.T(), err) + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") assert.NoError(s.T(), err) - decSha := sha256.New() - _, err = decSha.Write([]byte("DecryptedChecksum")) + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) + m, err := model.NewModelFromString(jsonadapter.Model) + assert.NoError(s.T(), err) + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) assert.NoError(s.T(), err) - fileInfo := database.FileInfo{ - UploadedChecksum: fmt.Sprintf("%x", encSha.Sum(nil)), - Size: 1000, - Path: filePath, - DecryptedChecksum: fmt.Sprintf("%x", decSha.Sum(nil)), - DecryptedSize: 948, - } - err = Conf.API.DB.SetArchived(fileInfo, fileID) - assert.NoError(s.T(), err, "failed to mark file as Archived") + w := httptest.NewRecorder() + payload, _ := json.Marshal(map[string]string{ + "user": user, + "filepath": filePath, + }) + r := httptest.NewRequest("POST", "/file/ingest?fileid="+fileID, bytes.NewBuffer(payload)) + r.Header.Add("Authorization", "Bearer "+s.Token) + r.Header.Set("Content-Type", "application/json") - err = Conf.API.DB.SetVerified(fileInfo, fileID) - assert.NoError(s.T(), err, "got (%v) when marking file as verified", err) + _, router := gin.CreateTestContext(w) + router.POST("/file/ingest", rbac(e), ingestFile) + router.ServeHTTP(w, r) + + resp := w.Result() + defer resp.Body.Close() + body, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(body), "both file ID parameter and payload provided") +} + +func (s *TestSuite) TestIngestFile_NoFileIDnoPayload() { + w := httptest.NewRecorder() + r := httptest.NewRequest("POST", "/file/ingest", nil) + r.Header.Add("Authorization", "Bearer "+s.Token) + + _, router := gin.CreateTestContext(w) + router.POST("/file/ingest", ingestFile) + + router.ServeHTTP(w, r) + okResponse := w.Result() + defer okResponse.Body.Close() + assert.Equal(s.T(), http.StatusBadRequest, okResponse.StatusCode) +} + +func (s *TestSuite) TestSetAccession_WithPayload() { + user := "dummy" + filePath := "/inbox/dummy_folder/dummyfile.c4gh" + accessionID := "accession-id-01" + _, _ = helperCreateVerifiedTestFile(s, user, filePath) gin.SetMode(gin.ReleaseMode) assert.NoError(s.T(), setupJwtAuth()) - Conf.Broker.SchemasPath = "../../schemas/isolated" m, err := model.NewModelFromString(jsonadapter.Model) - if err != nil { - s.T().Logf("failure: %v", err) - s.FailNow("failed to setup RBAC model") - } + assert.NoError(s.T(), err) e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) - if err != nil { - s.T().Logf("failure: %v", err) - s.FailNow("failed to setup RBAC enforcer") - } + assert.NoError(s.T(), err) - type accession struct { - AccessionID string `json:"accession_id"` - FilePath string `json:"filepath"` - User string `json:"user"` + payload, _ := json.Marshal(map[string]string{ + "user": user, + "filepath": filePath, + "accession_id": accessionID, + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodPost, "/file/accession", bytes.NewBuffer(payload)) + r.Header.Add("Authorization", "Bearer "+s.Token) + r.Header.Set("Content-Type", "application/json") + + _, router := gin.CreateTestContext(w) + router.POST("/file/accession", rbac(e), setAccession) + router.ServeHTTP(w, r) + + resp := w.Result() + defer resp.Body.Close() + assert.Equal(s.T(), http.StatusOK, resp.StatusCode) + + // verify that the message shows up in the queue + time.Sleep(10 * time.Second) // this is needed to ensure we don't get any false negatives + client := http.Client{Timeout: 5 * time.Second} + req, _ := http.NewRequest(http.MethodGet, "http://"+BrokerAPI+"/api/queues/sda/accession", http.NoBody) + req.SetBasicAuth("guest", "guest") + res, err := client.Do(req) + assert.NoError(s.T(), err, "failed to query broker") + var data struct { + MessagesReady int `json:"messages_ready"` } - aID := "API:accession-id-01" - accessionMsg, _ := json.Marshal(accession{AccessionID: aID, FilePath: filePath, User: user}) - // Mock request and response holders + body, err := io.ReadAll(res.Body) + _ = res.Body.Close() + assert.NoError(s.T(), err, "failed to read response from broker") + err = json.Unmarshal(body, &data) + assert.NoError(s.T(), err, "failed to unmarshal response") + assert.Equal(s.T(), 1, data.MessagesReady) +} + +func (s *TestSuite) TestSetAccession_WithPayload_WrongUser() { + user := "dummy" + filePath := "/inbox/dummy_folder/dummyfile.c4gh" + accessionID := "accession-id-01" + _, _ = helperCreateVerifiedTestFile(s, user, filePath) + + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) + m, err := model.NewModelFromString(jsonadapter.Model) + assert.NoError(s.T(), err) + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) + assert.NoError(s.T(), err) + + payload, _ := json.Marshal(map[string]string{ + "user": "Foo-bar", + "filepath": filePath, + "accession_id": accessionID, + }) + w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/file/accession", bytes.NewBuffer(accessionMsg)) + r := httptest.NewRequest(http.MethodPost, "/file/accession", bytes.NewBuffer(payload)) r.Header.Add("Authorization", "Bearer "+s.Token) + r.Header.Set("Content-Type", "application/json") _, router := gin.CreateTestContext(w) router.POST("/file/accession", rbac(e), setAccession) + router.ServeHTTP(w, r) + + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(b), "sql: no rows in result set") +} + +func (s *TestSuite) TestSetAccession_WithPayload_WrongPath() { + user := "dummy" + filePath := "/inbox/dummy_folder/dummyfile.c4gh" + accessionID := "accession-id-01" + _, _ = helperCreateVerifiedTestFile(s, user, filePath) + + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) + m, err := model.NewModelFromString(jsonadapter.Model) + assert.NoError(s.T(), err) + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) + assert.NoError(s.T(), err) + payload, _ := json.Marshal(map[string]string{ + "user": user, + "filepath": "/inbox/random/path/foo.c4gh", + "accession_id": accessionID, + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodPost, "/file/accession", bytes.NewBuffer(payload)) + r.Header.Add("Authorization", "Bearer "+s.Token) + r.Header.Set("Content-Type", "application/json") + + _, router := gin.CreateTestContext(w) + router.POST("/file/accession", rbac(e), setAccession) router.ServeHTTP(w, r) - okResponse := w.Result() - defer okResponse.Body.Close() - assert.Equal(s.T(), http.StatusOK, okResponse.StatusCode) + + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(b), "sql: no rows in result set") +} + +func (s *TestSuite) TestSetAccession_WithParams() { + user := "dummy" + filePath := "/inbox/dummy_folder/dummyfile.c4gh" + accessionID := "accession-id-01" + fileID, _ := helperCreateVerifiedTestFile(s, user, filePath) + + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) + m, err := model.NewModelFromString(jsonadapter.Model) + assert.NoError(s.T(), err) + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) + assert.NoError(s.T(), err) + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodPost, "/file/accession?fileid="+fileID+"&accessionid="+accessionID, nil) + r.Header.Add("Authorization", "Bearer "+s.Token) + + _, router := gin.CreateTestContext(w) + router.POST("/file/accession", rbac(e), setAccession) + router.ServeHTTP(w, r) + + resp := w.Result() + defer resp.Body.Close() + assert.Equal(s.T(), http.StatusOK, resp.StatusCode) // verify that the message shows up in the queue time.Sleep(10 * time.Second) // this is needed to ensure we don't get any false negatives @@ -1272,47 +1516,99 @@ func (s *TestSuite) TestSetAccession() { MessagesReady int `json:"messages_ready"` } body, err := io.ReadAll(res.Body) - res.Body.Close() + _ = res.Body.Close() assert.NoError(s.T(), err, "failed to read response from broker") err = json.Unmarshal(body, &data) assert.NoError(s.T(), err, "failed to unmarshal response") assert.Equal(s.T(), 1, data.MessagesReady) } -func (s *TestSuite) TestSetAccession_WrongUser() { +func (s *TestSuite) TestSetAccession_WithParams_WrongID() { + user := "dummy" + filePath := "/inbox/dummy_folder/dummyfile.c4gh" + accessionID := "accession-id-01" + _, _ = helperCreateVerifiedTestFile(s, user, filePath) + gin.SetMode(gin.ReleaseMode) assert.NoError(s.T(), setupJwtAuth()) - Conf.Broker.SchemasPath = "../../schemas/isolated" m, err := model.NewModelFromString(jsonadapter.Model) - if err != nil { - s.T().Logf("failure: %v", err) - s.FailNow("failed to setup RBAC model") - } + assert.NoError(s.T(), err) e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) - if err != nil { - s.T().Logf("failure: %v", err) - s.FailNow("failed to setup RBAC enforcer") - } + assert.NoError(s.T(), err) - type accession struct { - AccessionID string `json:"accession_id"` - FilePath string `json:"filepath"` - User string `json:"user"` - } - aID := "API:accession-id-01" - accessionMsg, _ := json.Marshal(accession{AccessionID: aID, FilePath: "/inbox/dummy/file11.c4gh", User: "fooBar"}) - // Mock request and response holders w := httptest.NewRecorder() - r := httptest.NewRequest(http.MethodPost, "/file/accession", bytes.NewBuffer(accessionMsg)) + r := httptest.NewRequest(http.MethodPost, "/file/accession?fileid=randomID-1234&accessionid="+accessionID, nil) + r.Header.Add("Authorization", "Bearer "+s.Token) + + _, router := gin.CreateTestContext(w) + router.POST("/file/accession", rbac(e), setAccession) + router.ServeHTTP(w, r) + + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(b), "file details not found") +} + +func (s *TestSuite) TestSetAccession_WithParams_MissingAccession() { + user := "dummy" + filePath := "/inbox/dummy_folder/dummyfile.c4gh" + fileID, _ := helperCreateVerifiedTestFile(s, user, filePath) + + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) + m, err := model.NewModelFromString(jsonadapter.Model) + assert.NoError(s.T(), err) + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) + assert.NoError(s.T(), err) + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodPost, "/file/accession?fileid="+fileID, nil) r.Header.Add("Authorization", "Bearer "+s.Token) _, router := gin.CreateTestContext(w) router.POST("/file/accession", rbac(e), setAccession) + router.ServeHTTP(w, r) + + resp := w.Result() + defer resp.Body.Close() + b, _ := io.ReadAll(resp.Body) + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) + assert.Contains(s.T(), string(b), "accessionid is not provided") +} + +func (s *TestSuite) TestSetAccession_BothPayloadAndParamsProvided() { + user := "dummy" + filePath := "/inbox/dummy_folder/dummyfile.c4gh" + accessionID := "accession-id-01" + fileID, _ := helperCreateVerifiedTestFile(s, user, filePath) + + gin.SetMode(gin.ReleaseMode) + assert.NoError(s.T(), setupJwtAuth()) + m, err := model.NewModelFromString(jsonadapter.Model) + assert.NoError(s.T(), err) + e, err := casbin.NewEnforcer(m, jsonadapter.NewAdapter(&s.RBAC)) + assert.NoError(s.T(), err) + payload, _ := json.Marshal(map[string]string{ + "user": user, + "filepath": filePath, + "accession_id": accessionID, + }) + + w := httptest.NewRecorder() + r := httptest.NewRequest(http.MethodPost, fmt.Sprintf("/file/accession?fileid=%s&accessionid=%s", fileID, accessionID), bytes.NewBuffer(payload)) + r.Header.Add("Authorization", "Bearer "+s.Token) + r.Header.Set("Content-Type", "application/json") + + _, router := gin.CreateTestContext(w) + router.POST("/file/accession", rbac(e), setAccession) router.ServeHTTP(w, r) - okResponse := w.Result() - defer okResponse.Body.Close() - assert.Equal(s.T(), http.StatusBadRequest, okResponse.StatusCode) + + resp := w.Result() + defer resp.Body.Close() + assert.Equal(s.T(), http.StatusBadRequest, resp.StatusCode) } func (s *TestSuite) TestSetAccession_WrongFormat() { @@ -1355,10 +1651,10 @@ func (s *TestSuite) TestCreateDataset() { user := "dummy" filePath := "/inbox/dummy/file12.c4gh" - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") - assert.NoError(s.T(), err, "failed to update satus of file in database") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") + assert.NoError(s.T(), err, "failed to update status of file in database") encSha := sha256.New() _, err = encSha.Write([]byte("Checksum")) @@ -1382,7 +1678,10 @@ func (s *TestSuite) TestCreateDataset() { assert.NoError(s.T(), err, "got (%v) when marking file as verified", err) err = Conf.API.DB.SetAccessionID("API:accession-id-11", fileID) - assert.NoError(s.T(), err, "got (%v) when marking file as verified", err) + assert.NoError(s.T(), err, "got (%v) when marking file accession", err) + + err = Conf.API.DB.UpdateFileEventLog(fileID, "ready", user, "{}", "{}") + assert.NoError(s.T(), err, "got (%v) when setting file status ready", err) gin.SetMode(gin.ReleaseMode) assert.NoError(s.T(), setupJwtAuth()) @@ -1424,7 +1723,7 @@ func (s *TestSuite) TestCreateDataset() { MessagesReady int `json:"messages_ready"` } body, err := io.ReadAll(res.Body) - res.Body.Close() + _ = res.Body.Close() assert.NoError(s.T(), err, "failed to read response from broker") assert.NoError(s.T(), json.Unmarshal(body, &data), "failed to unmarshal response") assert.Equal(s.T(), 1, data.MessagesReady) @@ -1434,9 +1733,9 @@ func (s *TestSuite) TestCreateDataset_BadFormat() { user := "dummy" filePath := "/inbox/dummy/file12.c4gh" - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") encSha := sha256.New() @@ -1466,7 +1765,7 @@ func (s *TestSuite) TestCreateDataset_BadFormat() { err = Conf.API.DB.SetAccessionID("API:accession-id-11", fileID) assert.NoError(s.T(), err, "got (%v) when marking file as verified", err) - err = Conf.API.DB.UpdateFileEventLog(fileID, "ready", fileID, "finalize", "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "ready", "finalize", "{}", "{}") assert.NoError(s.T(), err, "got (%v) when marking file as ready", err) gin.SetMode(gin.ReleaseMode) @@ -1497,7 +1796,7 @@ func (s *TestSuite) TestCreateDataset_BadFormat() { response := w.Result() body, err := io.ReadAll(response.Body) assert.NoError(s.T(), err) - response.Body.Close() + _ = response.Body.Close() assert.Equal(s.T(), http.StatusBadRequest, response.StatusCode) assert.Contains(s.T(), string(body), "does not match pattern") @@ -1522,7 +1821,7 @@ func (s *TestSuite) TestCreateDataset_MissingAccessionIDs() { response := w.Result() body, err := io.ReadAll(response.Body) assert.NoError(s.T(), err) - response.Body.Close() + _ = response.Body.Close() assert.Equal(s.T(), http.StatusBadRequest, response.StatusCode) assert.Contains(s.T(), string(body), "at least one accessionID is required") @@ -1546,19 +1845,19 @@ func (s *TestSuite) TestCreateDataset_WrongIDs() { response := w.Result() body, err := io.ReadAll(response.Body) assert.NoError(s.T(), err) - response.Body.Close() + _ = response.Body.Close() assert.Equal(s.T(), http.StatusBadRequest, response.StatusCode) - assert.Contains(s.T(), string(body), "accession ID not found: ") + assert.Contains(s.T(), string(body), "accession ID: API:accession-id-11 not found or owned by other user") } func (s *TestSuite) TestCreateDataset_WrongUser() { user := "dummy" filePath := "/inbox/dummy/file12.c4gh" - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") encSha := sha256.New() @@ -1605,21 +1904,21 @@ func (s *TestSuite) TestCreateDataset_WrongUser() { response := w.Result() body, err := io.ReadAll(response.Body) assert.NoError(s.T(), err) - response.Body.Close() + _ = response.Body.Close() assert.Equal(s.T(), http.StatusBadRequest, response.StatusCode) - assert.Contains(s.T(), string(body), "accession ID owned by other user") + assert.Contains(s.T(), string(body), "accession ID: API:accession-id-11 not found or owned by other user") } func (s *TestSuite) TestReleaseDataset() { user := "TestReleaseDataset" for i := 0; i < 3; i++ { - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), strings.ReplaceAll(user, "_", "@")) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), strings.ReplaceAll(user, "_", "@")) if err != nil { s.FailNow("failed to register file in database") } - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { s.FailNow("failed to update satus of file in database") } @@ -1676,7 +1975,7 @@ func (s *TestSuite) TestReleaseDataset() { MessagesReady int `json:"messages_ready"` } body, err := io.ReadAll(res.Body) - res.Body.Close() + _ = res.Body.Close() assert.NoError(s.T(), err, "failed to read response from broker") err = json.Unmarshal(body, &data) assert.NoError(s.T(), err, "failed to unmarshal response") @@ -1747,12 +2046,12 @@ func (s *TestSuite) TestReleaseDataset_DeprecatedDataset() { testUsers := []string{"user_example.org", "User-B", "User-C"} for _, user := range testUsers { for i := 0; i < 5; i++ { - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), strings.ReplaceAll(user, "_", "@")) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), strings.ReplaceAll(user, "_", "@")) if err != nil { s.FailNow("failed to register file in database") } - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { s.FailNow("failed to update satus of file in database") } @@ -1806,12 +2105,12 @@ func (s *TestSuite) TestListActiveUsers() { testUsers := []string{"User-A", "User-B", "User-C"} for _, user := range testUsers { for i := 0; i < 3; i++ { - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), user) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), user) if err != nil { s.FailNow("failed to register file in database") } - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { s.FailNow("failed to update satus of file in database") } @@ -1865,12 +2164,12 @@ func (s *TestSuite) TestListUserFiles() { testUsers := []string{"user_example.org", "User-B", "User-C"} for _, user := range testUsers { for i := 0; i < 5; i++ { - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), strings.ReplaceAll(user, "_", "@")) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i), strings.ReplaceAll(user, "_", "@")) if err != nil { s.FailNow("failed to register file in database") } - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { s.FailNow("failed to update satus of file in database") } @@ -1931,12 +2230,12 @@ func (s *TestSuite) TestListUserFiles_filteredSelection() { sub = "submission_b" } - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("%s/TestGetUserFiles-00%d.c4gh", sub, i), strings.ReplaceAll(user, "_", "@")) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("%s/TestGetUserFiles-00%d.c4gh", sub, i), strings.ReplaceAll(user, "_", "@")) if err != nil { s.FailNow("failed to register file in database") } - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { s.FailNow("failed to update satus of file in database") } @@ -2192,7 +2491,7 @@ func (s *TestSuite) TestDeprecateC4ghHash_wrongHash() { func (s *TestSuite) TestListDatasets() { for i := 0; i < 5; i++ { - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("/dummy/TestGetUserFiles-00%d.c4gh", i), "dummy") + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("/dummy/TestGetUserFiles-00%d.c4gh", i), "dummy") if err != nil { s.FailNow("failed to register file in database") } @@ -2250,7 +2549,7 @@ func (s *TestSuite) TestListDatasets() { func (s *TestSuite) TestListUserDatasets() { for i := 0; i < 5; i++ { - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("/user_example.org/TestGetUserFiles-00%d.c4gh", i), strings.ReplaceAll("user_example.org", "_", "@")) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("/user_example.org/TestGetUserFiles-00%d.c4gh", i), strings.ReplaceAll("user_example.org", "_", "@")) if err != nil { s.FailNow("failed to register file in database") } @@ -2307,7 +2606,7 @@ func (s *TestSuite) TestListUserDatasets() { func (s *TestSuite) TestListDatasetsAsUser() { for i := 0; i < 5; i++ { - fileID, err := Conf.API.DB.RegisterFile(fmt.Sprintf("/user_example.org/TestGetUserFiles-00%d.c4gh", i), s.User) + fileID, err := Conf.API.DB.RegisterFile(nil, fmt.Sprintf("/user_example.org/TestGetUserFiles-00%d.c4gh", i), s.User) if err != nil { s.FailNow("failed to register file in database") } @@ -2366,12 +2665,12 @@ func (s *TestSuite) TestReVerifyFile() { user := "TestReVerify" for i := 0; i < 3; i++ { filePath := fmt.Sprintf("/%v/TestReVerify-00%d.c4gh", user, i) - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) if err != nil { s.FailNow("failed to register file in database") } - if err := Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}"); err != nil { + if err := Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}"); err != nil { s.FailNow("failed to update satus of file in database") } encSha := sha256.New() @@ -2406,7 +2705,7 @@ func (s *TestSuite) TestReVerifyFile() { if err := Conf.API.DB.SetAccessionID(stableID, fileID); err != nil { s.FailNowf("got (%s) when setting stable ID: %s, %s", err.Error(), stableID, fileID) } - if err := Conf.API.DB.UpdateFileEventLog(fileID, "ready", fileID, "finalize", "{}", "{}"); err != nil { + if err := Conf.API.DB.UpdateFileEventLog(fileID, "ready", "finalize", "{}", "{}"); err != nil { s.FailNowf("got (%s) when updating file status: %s", err.Error(), filePath) } } @@ -2438,7 +2737,7 @@ func (s *TestSuite) TestReVerifyFile() { MessagesReady int `json:"messages_ready"` } body, err := io.ReadAll(res.Body) - res.Body.Close() + _ = res.Body.Close() assert.NoError(s.T(), err, "failed to read response from broker") err = json.Unmarshal(body, &data) assert.NoError(s.T(), err, "failed to unmarshal response") @@ -2468,12 +2767,12 @@ func (s *TestSuite) TestReVerifyDataset() { user := "TestReVerifyDataset" for i := 0; i < 3; i++ { filePath := fmt.Sprintf("/%v/TestReVerifyDataset-00%d.c4gh", user, i) - fileID, err := Conf.API.DB.RegisterFile(filePath, user) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, user) if err != nil { s.FailNow("failed to register file in database") } - if err := Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}"); err != nil { + if err := Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}"); err != nil { s.FailNow("failed to update satus of file in database") } encSha := sha256.New() @@ -2508,7 +2807,7 @@ func (s *TestSuite) TestReVerifyDataset() { if err := Conf.API.DB.SetAccessionID(stableID, fileID); err != nil { s.FailNowf("got (%s) when setting stable ID: %s, %s", err.Error(), stableID, fileID) } - if err := Conf.API.DB.UpdateFileEventLog(fileID, "ready", fileID, "finalize", "{}", "{}"); err != nil { + if err := Conf.API.DB.UpdateFileEventLog(fileID, "ready", "finalize", "{}", "{}"); err != nil { s.FailNowf("got (%s) when updating file status: %s", err.Error(), filePath) } } @@ -2545,7 +2844,7 @@ func (s *TestSuite) TestReVerifyDataset() { MessagesReady int `json:"messages_ready"` } body, err := io.ReadAll(res.Body) - res.Body.Close() + _ = res.Body.Close() assert.NoError(s.T(), err, "failed to read response from broker") err = json.Unmarshal(body, &data) assert.NoError(s.T(), err, "failed to unmarshal response") @@ -2588,9 +2887,9 @@ func (s *TestSuite) TestDownloadFile() { defer ts.Close() // Register the file in the database - fileID, err := Conf.API.DB.RegisterFile(filepath.Base(s.GoodC4ghFile), s.User) + fileID, err := Conf.API.DB.RegisterFile(nil, filepath.Base(s.GoodC4ghFile), s.User) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, s.User, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", s.User, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") // Mock request to download the file @@ -2675,9 +2974,9 @@ func (s *TestSuite) TestDownloadFile_fileNotExist() { // Register a file in the database (but don't create the actual file) filePath := fmt.Sprintf("/%v/nonexistent.c4gh", s.User) - fileID, err := Conf.API.DB.RegisterFile(filePath, s.User) + fileID, err := Conf.API.DB.RegisterFile(nil, filePath, s.User) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, s.User, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", s.User, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") // Mock request to download the file @@ -2707,9 +3006,9 @@ func (s *TestSuite) TestDownloadFile_badC4ghFile() { defer ts.Close() // Register a file in the database (but don't create the actual file) - fileID, err := Conf.API.DB.RegisterFile(filepath.Base(s.BadC4ghFile), s.User) + fileID, err := Conf.API.DB.RegisterFile(nil, filepath.Base(s.BadC4ghFile), s.User) assert.NoError(s.T(), err, "failed to register file in database") - err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", fileID, s.User, "{}", "{}") + err = Conf.API.DB.UpdateFileEventLog(fileID, "uploaded", s.User, "{}", "{}") assert.NoError(s.T(), err, "failed to update satus of file in database") // Mock request to download the file diff --git a/sda/cmd/api/swagger_v1.yml b/sda/cmd/api/swagger_v1.yml index 4904de349..bea8b78f5 100644 --- a/sda/cmd/api/swagger_v1.yml +++ b/sda/cmd/api/swagger_v1.yml @@ -164,34 +164,67 @@ paths: description: Internal application error /file/accession: post: - description: Assigns accession ID to a given file. + description: | + Assigns accession ID to a given file. + You can provide either a JSON payload with `user`, `filepath`, and `accession_id`, + or query parameters `fileid` and `accessionid`. + If both payload and parameters are provided, a 400 Bad Request is returned. + parameters: + - in: query + name: fileid + schema: + type: string + required: false + description: UUID of the file to accession. If provided, payload must be empty. + - in: query + name: accessionid + schema: + type: string + required: false + description: Accession ID to assign to the file. If provided, payload must be empty. requestBody: content: application/json: schema: $ref: "#/components/schemas/FileAccession" + required: false responses: "200": description: Successful operation. "400": - description: Bad payload + description: | + Bad request. Returned if both fileid/accessionid and payload are provided, or if payload is invalid, or if fileid is invalid. "401": description: Authentication failure. + "404": + description: Decrypted checksum not found. "500": description: Internal application error. /file/ingest: post: - description: Trigger ingestion of a given file. + description: | + Trigger ingestion of a given file. + You can provide either a JSON payload with `user` and `filepath`, or a `fileid` query parameter. + If both are provided, a 400 Bad Request is returned. + parameters: + - in: query + name: fileid + schema: + type: string + required: false + description: UUID of the file to ingest. If provided, payload must be empty. requestBody: content: application/json: schema: $ref: "#/components/schemas/FileIngest" + required: false responses: "200": description: Successful operation. "400": - description: Bad payload + description: | + Bad request. Returned if both fileid and payload are provided, or if payload is invalid, or if fileid is invalid. "401": description: Authentication failure. "500": @@ -445,4 +478,4 @@ components: scheme: bearer bearerFormat: JWT security: - - bearerAuth: [] \ No newline at end of file + - bearerAuth: [] diff --git a/sda/cmd/auth/info_test.go b/sda/cmd/auth/info_test.go index ae46fc5ad..809c3e076 100644 --- a/sda/cmd/auth/info_test.go +++ b/sda/cmd/auth/info_test.go @@ -53,5 +53,5 @@ func (ts *InfoTests) TestReadPublicKeyFile() { } func (ts *InfoTests) TearDownTest() { - os.RemoveAll(ts.TempDir) + _ = os.RemoveAll(ts.TempDir) } diff --git a/sda/cmd/auth/jwt_test.go b/sda/cmd/auth/jwt_test.go index 055df14b3..316f92ac7 100644 --- a/sda/cmd/auth/jwt_test.go +++ b/sda/cmd/auth/jwt_test.go @@ -53,7 +53,7 @@ func (ts *JWTTests) SetupTest() { } func (ts *JWTTests) TearDownTest() { - os.RemoveAll(ts.TempDir) + _ = os.RemoveAll(ts.TempDir) } func (ts *JWTTests) TestGenerateJwtToken() { diff --git a/sda/cmd/finalize/finalize.go b/sda/cmd/finalize/finalize.go index 8f7ca8acf..59bbabdc8 100644 --- a/sda/cmd/finalize/finalize.go +++ b/sda/cmd/finalize/finalize.go @@ -73,7 +73,7 @@ func main() { log.Fatal(err) } for delivered := range messages { - log.Debugf("Received a message (corr-id: %s, message: %s)", delivered.CorrelationId, delivered.Body) + log.Debugf("Received a message (correlation-id: %s, message: %s)", delivered.CorrelationId, delivered.Body) err := schema.ValidateJSON(fmt.Sprintf("%s/ingestion-accession.json", conf.Broker.SchemasPath), delivered.Body) if err != nil { log.Errorf("validation of incoming message (ingestion-accession) failed, correlation-id: %s, reason: %v ", delivered.CorrelationId, err) @@ -84,12 +84,13 @@ func main() { continue } + fileID := delivered.CorrelationId // we unmarshal the message in the validation step so this is safe to do _ = json.Unmarshal(delivered.Body, &message) // If the file has been canceled by the uploader, don't spend time working on it. - status, err := db.GetFileStatus(delivered.CorrelationId) + status, err := db.GetFileStatus(fileID) if err != nil { - log.Errorf("failed to get file status, correlation-id: %s, reason: %v", delivered.CorrelationId, err) + log.Errorf("failed to get file status, file-id: %s, reason: %v", fileID, err) if err := delivered.Nack(false, true); err != nil { log.Errorf("failed to Nack message, reason: %v", err) } @@ -99,24 +100,23 @@ func main() { switch status { case "disabled": - log.Infof("file with correlation-id: %s is disabled, aborting work", delivered.CorrelationId) + log.Infof("file with file-id: %s is disabled, aborting work", fileID) if err := delivered.Ack(false); err != nil { log.Errorf("Failed acking canceled work, reason: %v", err) } continue - case "verified": - case "enabled": + case "verified", "enabled": case "ready": - log.Infof("File with correlation-id: %s is already marked as ready.", delivered.CorrelationId) + log.Infof("File with file-id: %s is already marked as ready.", fileID) if err := delivered.Ack(false); err != nil { log.Errorf("Failed acking message, reason: %v", err) } continue default: - log.Warnf("file with correlation-id: %s is not verified yet, aborting work", delivered.CorrelationId) + log.Warnf("file with file-id: %s is not verified yet, aborting work", fileID) if err := delivered.Nack(false, true); err != nil { log.Errorf("Failed acking canceled work, reason: %v", err) } @@ -124,16 +124,6 @@ func main() { continue } - fileID, err := db.GetFileID(delivered.CorrelationId) - if err != nil { - log.Errorf("failed to get file-id for file with correlation-id: %s, reason: %v", delivered.CorrelationId, err) - if err := delivered.Nack(false, true); err != nil { - log.Errorf("failed to Nack message, reason: %v", err) - } - - continue - } - c := schema.IngestionCompletion{ User: message.User, FilePath: message.FilePath, @@ -170,7 +160,7 @@ func main() { body, _ := json.Marshal(fileError) // Send the message to an error queue so it can be analyzed. - if e := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, "error", body); e != nil { + if e := mq.SendMessage(fileID, conf.Broker.Exchange, "error", body); e != nil { log.Errorf("failed to publish message, reason: %v", err) } @@ -204,7 +194,7 @@ func main() { } // Mark file as "ready" - if err := db.UpdateFileEventLog(fileID, "ready", delivered.CorrelationId, "finalize", "{}", string(delivered.Body)); err != nil { + if err := db.UpdateFileEventLog(fileID, "ready", "finalize", "{}", string(delivered.Body)); err != nil { log.Errorf("set status ready failed, file-id: %s, reason: %v", fileID, err) if err := delivered.Nack(false, true); err != nil { log.Errorf("failed to Nack message, reason: %v", err) @@ -213,7 +203,7 @@ func main() { continue } - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, conf.Broker.RoutingKey, completeMsg); err != nil { + if err := mq.SendMessage(fileID, conf.Broker.Exchange, conf.Broker.RoutingKey, completeMsg); err != nil { log.Errorf("failed to publish message, reason: %v", err) if err := delivered.Nack(false, true); err != nil { log.Errorf("failed to Nack message, reason: %v", err) @@ -233,10 +223,7 @@ func main() { func backupFile(delivered amqp.Delivery) error { log.Debug("Backup initiated") - fileID, err := db.GetFileID(delivered.CorrelationId) - if err != nil { - return fmt.Errorf("failed to get ID for file, reason: %s", err.Error()) - } + fileID := delivered.CorrelationId filePath, fileSize, err := db.GetArchived(fileID) if err != nil { @@ -272,7 +259,7 @@ func backupFile(delivered amqp.Delivery) error { } // Mark file as "backed up" - if err := db.UpdateFileEventLog(fileID, "backed up", delivered.CorrelationId, "finalize", "{}", string(delivered.Body)); err != nil { + if err := db.UpdateFileEventLog(fileID, "backed up", "finalize", "{}", string(delivered.Body)); err != nil { return fmt.Errorf("UpdateFileEventLog failed, reason: (%v)", err) } diff --git a/sda/cmd/ingest/ingest.go b/sda/cmd/ingest/ingest.go index 5c6492356..2c4a70271 100644 --- a/sda/cmd/ingest/ingest.go +++ b/sda/cmd/ingest/ingest.go @@ -123,7 +123,7 @@ func main() { } for delivered := range messages { - log.Debugf("received a message (corr-id: %s, message: %s)", delivered.CorrelationId, delivered.Body) + log.Debugf("received a message (correlation-id: %s, message: %s)", delivered.CorrelationId, delivered.Body) message := schema.IngestionTrigger{} err := schema.ValidateJSON(fmt.Sprintf("%s/ingestion-trigger.json", app.Conf.Broker.SchemasPath), delivered.Body) if err != nil { @@ -148,7 +148,7 @@ func main() { // we unmarshal the message in the validation step so this is safe to do _ = json.Unmarshal(delivered.Body, &message) - log.Infof("Received work (corr-id: %s, filepath: %s, user: %s)", delivered.CorrelationId, message.FilePath, message.User) + log.Infof("Received work (correlation-id: %s, filepath: %s, user: %s)", delivered.CorrelationId, message.FilePath, message.User) ackNack := "" switch message.Type { @@ -201,10 +201,10 @@ func (app *Ingest) registerC4GHKey() error { return nil } -func (app *Ingest) cancelFile(correlationID string, message schema.IngestionTrigger) string { - fileID, err := app.DB.GetFileID(correlationID) - if err != nil { - log.Errorf("failed to get file-id for file from message (correlation-id: %s), reason: %s", correlationID, err.Error()) +func (app *Ingest) cancelFile(fileID string, message schema.IngestionTrigger) string { + m, _ := json.Marshal(message) + if err := app.DB.UpdateFileEventLog(fileID, "disabled", "ingest", "{}", string(m)); err != nil { + log.Errorf("failed to update event log for file with id : %s", fileID) if strings.Contains(err.Error(), "sql: no rows in result set") { return "reject" } @@ -212,33 +212,19 @@ func (app *Ingest) cancelFile(correlationID string, message schema.IngestionTrig return "nack" } - m, _ := json.Marshal(message) - if err := app.DB.UpdateFileEventLog(fileID, "disabled", correlationID, "ingest", "{}", string(m)); err != nil { - log.Errorf("failed to update event log for file with id : %s", fileID) - - return "nack" - } - return "ack" } -func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrigger) string { - var fileID string - status, err := app.DB.GetFileStatus(correlationID) +func (app *Ingest) ingestFile(fileID string, message schema.IngestionTrigger) string { + status, err := app.DB.GetFileStatus(fileID) if err != nil && err.Error() != "sql: no rows in result set" { - log.Errorf("failed to get status for file, correlation-id: %s, reason: (%s)", correlationID, err.Error()) + log.Errorf("failed to get status for file, fileID: %s, reason: (%s)", fileID, err.Error()) return "nack" } switch status { case "disabled": - fileID, err = app.DB.GetFileID(correlationID) - if err != nil { - log.Errorf("failed to get file-id for file, correlation-id: %s, reason: %s", correlationID, err.Error()) - - return "nack" - } fileInfo, err := app.DB.GetFileInfo(fileID) if err != nil { @@ -257,7 +243,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig log.Errorf("Failed to open file to ingest, file-id: %s, inbox path: %s, reason: (%s)", fileID, message.FilePath, err.Error()) jsonMsg, _ := json.Marshal(map[string]string{"error": err.Error()}) m, _ := json.Marshal(message) - if err := app.DB.UpdateFileEventLog(fileID, "error", correlationID, "ingest", string(jsonMsg), string(m)); err != nil { + if err := app.DB.UpdateFileEventLog(fileID, "error", "ingest", string(jsonMsg), string(m)); err != nil { log.Errorf("failed to set error status for file from message, file-id: %s, reason: %s", fileID, err.Error()) } // Send the message to an error queue so it can be analyzed. @@ -267,7 +253,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig OriginalMessage: message, } body, _ := json.Marshal(fileError) - if err := app.MQ.SendMessage(correlationID, app.Conf.Broker.Exchange, "error", body); err != nil { + if err := app.MQ.SendMessage(fileID, app.Conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: %v", err) return "reject" @@ -307,13 +293,13 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig } m, _ := json.Marshal(message) - if err = app.DB.UpdateFileEventLog(fileInfo.Path, "enabled", correlationID, "ingest", "{}", string(m)); err != nil { + if err = app.DB.UpdateFileEventLog(fileInfo.Path, "enabled", "ingest", "{}", string(m)); err != nil { log.Errorf("failed to set ingestion status for file from message, file-id: %s", fileID) return "nack" } - if err := app.MQ.SendMessage(correlationID, app.Conf.Broker.Exchange, app.Conf.Broker.RoutingKey, archivedMsg); err != nil { + if err := app.MQ.SendMessage(fileID, app.Conf.Broker.Exchange, app.Conf.Broker.RoutingKey, archivedMsg); err != nil { log.Errorf("failed to publish message, reason: %v", err) return "reject" @@ -323,22 +309,17 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig } case "": // Catch all for implementations that don't update the DB, e.g. for those not using S3inbox or sftpInbox - log.Infof("registering file, correlation-id: %s", correlationID) - fileID, err = app.DB.RegisterFile(message.FilePath, message.User) + log.Infof("registering file, file-id: %s", fileID) + fileID, err = app.DB.RegisterFile(&fileID, message.FilePath, message.User) if err != nil { - log.Errorf("failed to register file, correlation-id: %s, reason: (%s)", correlationID, err.Error()) + log.Errorf("failed to register file, fileID: %s, reason: (%s)", fileID, err.Error()) return "nack" } case "uploaded": - fileID, err = app.DB.GetFileID(correlationID) - if err != nil { - log.Errorf("failed to get ID for file, correlation-id: %s, reason: %s", correlationID, err.Error()) - return "nack" - } default: - log.Warnf("unsupported file status: %s, correlation-id: %s", status, correlationID) + log.Warnf("unsupported file status: %s, file-id: %s", status, fileID) return "reject" } @@ -350,7 +331,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig log.Errorf("Failed to open file to ingest reason: (%s)", err.Error()) jsonMsg, _ := json.Marshal(map[string]string{"error": err.Error()}) m, _ := json.Marshal(message) - if err := app.DB.UpdateFileEventLog(fileID, "error", correlationID, "ingest", string(jsonMsg), string(m)); err != nil { + if err := app.DB.UpdateFileEventLog(fileID, "error", "ingest", string(jsonMsg), string(m)); err != nil { log.Errorf("failed to set error status for file from message, file-id: %s, reason: %s", fileID, err.Error()) } // Send the message to an error queue so it can be analyzed. @@ -360,7 +341,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig OriginalMessage: message, } body, _ := json.Marshal(fileError) - if err := app.MQ.SendMessage(correlationID, app.Conf.Broker.Exchange, "error", body); err != nil { + if err := app.MQ.SendMessage(fileID, app.Conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: %v", err) return "reject" @@ -389,7 +370,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig } m, _ := json.Marshal(message) - if err = app.DB.UpdateFileEventLog(fileID, "submitted", correlationID, "ingest", "{}", string(m)); err != nil { + if err = app.DB.UpdateFileEventLog(fileID, "submitted", "ingest", "{}", string(m)); err != nil { log.Errorf("failed to set ingestion status for file from message, file-id: %s, reason: %s", fileID, err.Error()) } @@ -444,7 +425,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig if privateKey == nil { log.Errorf("All keys failed to decrypt the submitted file, file-id: %s", fileID) m, _ := json.Marshal(message) - if err := app.DB.UpdateFileEventLog(fileID, "error", correlationID, "ingest", `{"error" : "Decryption failed with all available key(s)"}`, string(m)); err != nil { + if err := app.DB.UpdateFileEventLog(fileID, "error", "ingest", `{"error" : "Decryption failed with all available key(s)"}`, string(m)); err != nil { log.Errorf("Failed to set ingestion status for file from message, file-id: %s, reason: %s", fileID, err.Error()) } @@ -455,7 +436,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig OriginalMessage: message, } body, _ := json.Marshal(fileError) - if err := app.MQ.SendMessage(correlationID, app.Conf.Broker.Exchange, "error", body); err != nil { + if err := app.MQ.SendMessage(fileID, app.Conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: %v", err) } @@ -535,7 +516,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig log.Debugf("Wrote archived file (file-id: %s, user: %s, filepath: %s, archivepath: %s, archivedsize: %d)", fileID, message.User, message.FilePath, fileID, fileInfo.Size) - status, err = app.DB.GetFileStatus(correlationID) + status, err = app.DB.GetFileStatus(fileID) if err != nil { log.Errorf("failed to get file status, file-id: %s, reason: (%s)", fileID, err.Error()) @@ -554,7 +535,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig return "nack" } - if err := app.DB.UpdateFileEventLog(fileID, "archived", correlationID, "ingest", "{}", string(m)); err != nil { + if err := app.DB.UpdateFileEventLog(fileID, "archived", "ingest", "{}", string(m)); err != nil { log.Errorf("failed to set event log status for file, file-id: %s, reason: %s", fileID, err.Error()) return "nack" @@ -580,7 +561,7 @@ func (app *Ingest) ingestFile(correlationID string, message schema.IngestionTrig return "nack" } - if err := app.MQ.SendMessage(correlationID, app.Conf.Broker.Exchange, app.Conf.Broker.RoutingKey, archivedMsg); err != nil { + if err := app.MQ.SendMessage(fileID, app.Conf.Broker.Exchange, app.Conf.Broker.RoutingKey, archivedMsg); err != nil { // TODO fix resend mechanism log.Errorf("failed to publish message, reason: %v", err) diff --git a/sda/cmd/ingest/ingest_test.go b/sda/cmd/ingest/ingest_test.go index ead55be34..1ac59eacb 100644 --- a/sda/cmd/ingest/ingest_test.go +++ b/sda/cmd/ingest/ingest_test.go @@ -133,7 +133,7 @@ func TestMain(m *testing.M) { if err != nil || res.StatusCode != 200 { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { @@ -385,11 +385,10 @@ func (ts *TestSuite) TestCancelFile() { // prepare the DB entries userName := "test-cancel" file1 := fmt.Sprintf("/%v/TestCancelMessage.c4gh", userName) - fileID, err := ts.ingest.DB.RegisterFile(file1, userName) + fileID, err := ts.ingest.DB.RegisterFile(nil, file1, userName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, userName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", userName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -399,17 +398,16 @@ func (ts *TestSuite) TestCancelFile() { User: userName, } - assert.Equal(ts.T(), "ack", ts.ingest.cancelFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.cancelFile(fileID, message)) } func (ts *TestSuite) TestCancelFile_wrongCorrelationID() { // prepare the DB entries userName := "test-cancel" file1 := fmt.Sprintf("/%v/TestCancelMessage_wrongCorrelationID.c4gh", userName) - fileID, err := ts.ingest.DB.RegisterFile(file1, userName) + fileID, err := ts.ingest.DB.RegisterFile(nil, file1, userName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, userName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", userName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -419,17 +417,16 @@ func (ts *TestSuite) TestCancelFile_wrongCorrelationID() { User: userName, } - assert.Equal(ts.T(), "reject", ts.ingest.cancelFile(uuid.New().String(), message)) + assert.Equal(ts.T(), "reject", ts.ingest.cancelFile(uuid.NewString(), message)) } // messages of type `ingest` func (ts *TestSuite) TestIngestFile() { // prepare the DB entries - fileID, err := ts.ingest.DB.RegisterFile(ts.filePath, ts.UserName) + fileID, err := ts.ingest.DB.RegisterFile(nil, ts.filePath, ts.UserName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, ts.UserName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", ts.UserName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -439,15 +436,14 @@ func (ts *TestSuite) TestIngestFile() { User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) } func (ts *TestSuite) TestIngestFile_secondTime() { // prepare the DB entries - fileID, err := ts.ingest.DB.RegisterFile(ts.filePath, ts.UserName) + fileID, err := ts.ingest.DB.RegisterFile(nil, ts.filePath, ts.UserName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, ts.UserName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", ts.UserName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -457,10 +453,10 @@ func (ts *TestSuite) TestIngestFile_secondTime() { User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) // file is already in `archived` state - assert.Equal(ts.T(), "reject", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "reject", ts.ingest.ingestFile(fileID, message)) } func (ts *TestSuite) TestIngestFile_unknownInboxType() { message := schema.IngestionTrigger{ @@ -473,11 +469,10 @@ func (ts *TestSuite) TestIngestFile_unknownInboxType() { } func (ts *TestSuite) TestIngestFile_reingestCancelledFile() { // prepare the DB entries - fileID, err := ts.ingest.DB.RegisterFile(ts.filePath, ts.UserName) + fileID, err := ts.ingest.DB.RegisterFile(nil, ts.filePath, ts.UserName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, ts.UserName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", ts.UserName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -487,21 +482,20 @@ func (ts *TestSuite) TestIngestFile_reingestCancelledFile() { User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", corrID, "ingest", "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", "ingest", "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) } func (ts *TestSuite) TestIngestFile_reingestCancelledFileNewChecksum() { // prepare the DB entries - fileID, err := ts.ingest.DB.RegisterFile(ts.filePath, ts.UserName) + fileID, err := ts.ingest.DB.RegisterFile(nil, ts.filePath, ts.UserName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, ts.UserName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", ts.UserName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -511,9 +505,9 @@ func (ts *TestSuite) TestIngestFile_reingestCancelledFileNewChecksum() { User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", corrID, "ingest", "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", "ingest", "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -555,7 +549,7 @@ func (ts *TestSuite) TestIngestFile_reingestCancelledFileNewChecksum() { crypt4GHWriter.Close() // reingestion should work - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) // DB should have the new checksum var dbChecksum string @@ -568,11 +562,10 @@ func (ts *TestSuite) TestIngestFile_reingestCancelledFileNewChecksum() { } func (ts *TestSuite) TestIngestFile_reingestVerifiedFile() { // prepare the DB entries - fileID, err := ts.ingest.DB.RegisterFile(ts.filePath, ts.UserName) + fileID, err := ts.ingest.DB.RegisterFile(nil, ts.filePath, ts.UserName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, ts.UserName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", ts.UserName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -582,7 +575,7 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedFile() { User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) // fake file verification sha256hash := sha256.New() @@ -595,15 +588,14 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedFile() { ts.Fail("failed to mark file as verified") } - assert.Equal(ts.T(), "reject", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "reject", ts.ingest.ingestFile(fileID, message)) } func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFile() { // prepare the DB entries - fileID, err := ts.ingest.DB.RegisterFile(ts.filePath, ts.UserName) + fileID, err := ts.ingest.DB.RegisterFile(nil, ts.filePath, ts.UserName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, ts.UserName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", ts.UserName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -613,7 +605,7 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFile() { User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) // fake file verification sha256hash := sha256.New() @@ -626,19 +618,18 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFile() { ts.Fail("failed to mark file as verified") } - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", corrID, "ingest", "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", "ingest", "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) } func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFileNewChecksum() { // prepare the DB entries - fileID, err := ts.ingest.DB.RegisterFile(ts.filePath, ts.UserName) + fileID, err := ts.ingest.DB.RegisterFile(nil, ts.filePath, ts.UserName) assert.NoError(ts.T(), err, "failed to register file in database") - corrID := uuid.New().String() - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", corrID, ts.UserName, "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "uploaded", ts.UserName, "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -648,7 +639,7 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFileNewChecksum() { User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) var firstDbChecksum string const q1 = "SELECT checksum from sda.checksums WHERE source = 'UPLOADED' and file_id = $1;" @@ -667,7 +658,7 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFileNewChecksum() { ts.Fail("failed to mark file as verified") } - if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", corrID, "ingest", "{}", "{}"); err != nil { + if err = ts.ingest.DB.UpdateFileEventLog(fileID, "disabled", "ingest", "{}", "{}"); err != nil { ts.Fail("failed to update file event log") } @@ -709,7 +700,7 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFileNewChecksum() { crypt4GHWriter.Close() // reingestion should work - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(fileID, message)) // DB should have the new checksum var dbChecksum string @@ -724,16 +715,18 @@ func (ts *TestSuite) TestIngestFile_reingestVerifiedCancelledFileNewChecksum() { } func (ts *TestSuite) TestIngestFile_missingFile() { // prepare the DB entries - corrID := uuid.New().String() + basepath := filepath.Dir(ts.filePath) + newFileID := uuid.NewString() + message := schema.IngestionTrigger{ Type: "ingest", FilePath: fmt.Sprintf("%s/missing.file.c4gh", basepath), User: ts.UserName, } - assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(corrID, message)) + assert.Equal(ts.T(), "ack", ts.ingest.ingestFile(newFileID, message)) } func (ts *TestSuite) TestDetectMisingC4GHKeys() { viper.Set("c4gh.privateKeys", "") diff --git a/sda/cmd/intercept/intercept.go b/sda/cmd/intercept/intercept.go index 3aa44f410..5cd022a29 100644 --- a/sda/cmd/intercept/intercept.go +++ b/sda/cmd/intercept/intercept.go @@ -102,7 +102,7 @@ func main() { continue } - log.Infof("Routing message (corr-id: %s, routingkey: %s)", delivered.CorrelationId, routingKey) + log.Infof("Routing message (correlation-id: %s, routingkey: %s)", delivered.CorrelationId, routingKey) if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, routingKey, delivered.Body); err != nil { log.Errorf("failed to publish message, reason: (%v)", err) } diff --git a/sda/cmd/mapper/mapper.go b/sda/cmd/mapper/mapper.go index ee58fc8cc..be6ec6eb5 100644 --- a/sda/cmd/mapper/mapper.go +++ b/sda/cmd/mapper/mapper.go @@ -104,7 +104,7 @@ func main() { } for _, aID := range mappings.AccessionIDs { - log.Debugf("Mapped file to dataset (corr-id: %s, datasetid: %s, accessionid: %s)", delivered.CorrelationId, mappings.DatasetID, aID) + log.Debugf("Mapped file to dataset (correlation-id: %s, datasetid: %s, accessionid: %s)", delivered.CorrelationId, mappings.DatasetID, aID) fileInfo, err := db.GetFileInfoFromAccessionID(aID) if err != nil { log.Errorf("failed to get file info for file with stable ID: %s", aID) diff --git a/sda/cmd/notify/notify_test.go b/sda/cmd/notify/notify_test.go index 2bb29d1b0..44242491c 100644 --- a/sda/cmd/notify/notify_test.go +++ b/sda/cmd/notify/notify_test.go @@ -124,7 +124,7 @@ func TestSendEmail(t *testing.T) { }) if err := server.Start(); err != nil { - fmt.Println(err) + _, _ = fmt.Println(err) } hostAddress, portNumber := "127.0.0.1", server.PortNumber diff --git a/sda/cmd/orchestrate/orchestrate.go b/sda/cmd/orchestrate/orchestrate.go index 457ac838a..53b8d2fdb 100644 --- a/sda/cmd/orchestrate/orchestrate.go +++ b/sda/cmd/orchestrate/orchestrate.go @@ -381,7 +381,7 @@ func validateMsg(delivered *amqp091.Delivery, mq *broker.AMQPBroker, routingKey return err } - log.Debugf("Routing message (corr-id: %s, routingkey: %s, message: %s)", delivered.CorrelationId, routingKey, publishMsg) + log.Debugf("Routing message (correlation-id: %s, routingkey: %s, message: %s)", delivered.CorrelationId, routingKey, publishMsg) if err := mq.SendMessage(delivered.CorrelationId, mq.Conf.Exchange, routingKey, publishMsg); err != nil { // TODO fix resend mechanism diff --git a/sda/cmd/reencrypt/reencrypt.go b/sda/cmd/reencrypt/reencrypt.go index 0f954c3fb..e3ade4da8 100644 --- a/sda/cmd/reencrypt/reencrypt.go +++ b/sda/cmd/reencrypt/reencrypt.go @@ -97,7 +97,7 @@ func (s *server) ReencryptHeader(_ context.Context, in *re.ReencryptRequest) (*r } } - return nil, status.Error(400, err.Error()) + return nil, status.Error(400, "header reencryption failed, no matching key available") } // Check implements the healthgrpc.HealthServer Check method for the proxy grpc Health server. diff --git a/sda/cmd/reencrypt/reencrypt_test.go b/sda/cmd/reencrypt/reencrypt_test.go index 779217454..55da6ebe1 100644 --- a/sda/cmd/reencrypt/reencrypt_test.go +++ b/sda/cmd/reencrypt/reencrypt_test.go @@ -86,7 +86,7 @@ func (ts *ReEncryptTests) SetupTest() { } func (ts *ReEncryptTests) TearDownTest() { - os.RemoveAll(ts.KeyPath) + _ = os.RemoveAll(ts.KeyPath) } func (ts *ReEncryptTests) TestReencryptHeader() { @@ -342,3 +342,182 @@ func (ts *ReEncryptTests) TestReencryptHeader_TLS() { assert.NoError(ts.T(), err) assert.Equal(ts.T(), "content", string(data)) } + +func (ts *ReEncryptTests) TestCallReencryptHeader() { + lis, err := net.Listen("tcp", "localhost:50061") + if err != nil { + ts.T().FailNow() + } + + go func() { + var opts []grpc.ServerOption + s := grpc.NewServer(opts...) + re.RegisterReencryptServer(s, &server{c4ghPrivateKeyList: ts.PrivateKeyList}) + if err := s.Serve(lis); err != nil { + ts.T().Fail() + } + }() + + grpcConf := config.Grpc{ + Host: "localhost", + Port: 50061, + Timeout: 30, + } + res, err := re.CallReencryptHeader(ts.FileHeader, ts.UserPubKeyString, grpcConf) + assert.NoError(ts.T(), err) + + assert.Equal(ts.T(), "crypt4gh", string(res[:8])) + + hr := bytes.NewReader(res) + fileStream := io.MultiReader(hr, bytes.NewReader(ts.FileData)) + + c4gh, err := streaming.NewCrypt4GHReader(fileStream, ts.UserPrivateKey, nil) + assert.NoError(ts.T(), err) + + data, err := io.ReadAll(c4gh) + assert.NoError(ts.T(), err) + assert.Equal(ts.T(), "content", string(data)) +} + +func (ts *ReEncryptTests) TestCallReencryptHeaderTLS() { + certPath := ts.T().TempDir() + helper.MakeCerts(certPath) + rootCAs := x509.NewCertPool() + cacertFile, err := os.ReadFile(certPath + "/ca.crt") + if err != nil { + ts.T().FailNow() + } + ok := rootCAs.AppendCertsFromPEM(cacertFile) + if !ok { + ts.T().FailNow() + } + certs, err := tls.LoadX509KeyPair(certPath+"/tls.crt", certPath+"/tls.key") + if err != nil { + ts.T().Log(err.Error()) + ts.T().FailNow() + } + + lis, err := net.Listen("tcp", "localhost:50062") + if err != nil { + ts.T().FailNow() + } + + go func() { + serverCreds := credentials.NewTLS( + &tls.Config{ + Certificates: []tls.Certificate{certs}, + ClientAuth: tls.RequireAndVerifyClientCert, + MinVersion: tls.VersionTLS13, + ClientCAs: rootCAs, + }, + ) + opts := []grpc.ServerOption{grpc.Creds(serverCreds)} + s := grpc.NewServer(opts...) + re.RegisterReencryptServer(s, &server{c4ghPrivateKeyList: ts.PrivateKeyList}) + if err := s.Serve(lis); err != nil { + ts.T().Fail() + } + }() + + clientCreds := credentials.NewTLS( + &tls.Config{ + Certificates: []tls.Certificate{certs}, + MinVersion: tls.VersionTLS13, + RootCAs: rootCAs, + }, + ) + + grpcConf := config.Grpc{ + ClientCreds: clientCreds, + Host: "localhost", + Port: 50062, + Timeout: 30, + } + res, err := re.CallReencryptHeader(ts.FileHeader, ts.UserPubKeyString, grpcConf) + assert.NoError(ts.T(), err) + + assert.Equal(ts.T(), "crypt4gh", string(res[:8])) + + hr := bytes.NewReader(res) + fileStream := io.MultiReader(hr, bytes.NewReader(ts.FileData)) + + c4gh, err := streaming.NewCrypt4GHReader(fileStream, ts.UserPrivateKey, nil) + assert.NoError(ts.T(), err) + + data, err := io.ReadAll(c4gh) + assert.NoError(ts.T(), err) + assert.Equal(ts.T(), "content", string(data)) +} + +func (ts *ReEncryptTests) TestCallReencryptHeader_ConnectionError() { + grpcConf := config.Grpc{ + Host: "locahost", + Port: 50063, + Timeout: 30, + } + _, err := re.CallReencryptHeader(ts.FileHeader, ts.UserPubKeyString, grpcConf) + assert.Error(ts.T(), err, "expected a connection error") +} + +func (ts *ReEncryptTests) TestCallReencryptHeader_BadInput() { + lis, err := net.Listen("tcp", "localhost:50064") + if err != nil { + ts.T().FailNow() + } + + go func() { + var opts []grpc.ServerOption + s := grpc.NewServer(opts...) + re.RegisterReencryptServer(s, &server{c4ghPrivateKeyList: ts.PrivateKeyList}) + if err := s.Serve(lis); err != nil { + ts.T().Fail() + } + }() + + grpcConf := config.Grpc{ + Host: "localhost", + Port: 50064, + Timeout: 30, + } + + res, err := re.CallReencryptHeader(ts.FileHeader, "somekey", grpcConf) + assert.ErrorContains(ts.T(), err, "illegal base64 data") + assert.Nil(ts.T(), res) +} + +func (ts *ReEncryptTests) TestReencryptHeader_NoMatchingKey() { + lis, err := net.Listen("tcp", "localhost:50065") + if err != nil { + ts.T().FailNow() + } + + var keyList []*[32]byte + _, testKey, err := keys.GenerateKeyPair() + if err != nil { + ts.T().FailNow() + } + keyList = append(keyList, (&testKey)) + + go func() { + var opts []grpc.ServerOption + s := grpc.NewServer(opts...) + re.RegisterReencryptServer(s, &server{c4ghPrivateKeyList: keyList}) + _ = s.Serve(lis) + }() + + var opts []grpc.DialOption + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient("localhost:50065", opts...) + if err != nil { + ts.T().FailNow() + } + defer conn.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + c := re.NewReencryptClient(conn) + res, err := c.ReencryptHeader(ctx, &re.ReencryptRequest{Oldheader: ts.FileHeader, Publickey: ts.UserPubKeyString}) + assert.Contains(ts.T(), err.Error(), "reencryption failed, no matching key available") + assert.Nil(ts.T(), res) +} diff --git a/sda/cmd/rotatekey/rotatekey.go b/sda/cmd/rotatekey/rotatekey.go new file mode 100644 index 000000000..989c92e4c --- /dev/null +++ b/sda/cmd/rotatekey/rotatekey.go @@ -0,0 +1,281 @@ +// The rotatekey service accepts messages to re-encrypt a file identified by its fileID. +// The service re-encrypts the file header with a configured public key and stores it +// in the database together with the key-hash of the rotation key. +// It then sends a message to verify so that the file is re-verified. + +package main + +import ( + "bytes" + "encoding/base64" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + + "github.com/neicnordic/crypt4gh/keys" + "github.com/neicnordic/sensitive-data-archive/internal/broker" + "github.com/neicnordic/sensitive-data-archive/internal/config" + "github.com/neicnordic/sensitive-data-archive/internal/database" + "github.com/neicnordic/sensitive-data-archive/internal/reencrypt" + "github.com/neicnordic/sensitive-data-archive/internal/schema" + log "github.com/sirupsen/logrus" +) + +type RotateKey struct { + Conf *config.Config + MQ *broker.AMQPBroker + DB *database.SDAdb + PubKeyEncoded string +} + +func main() { + app := RotateKey{} + var err error + + sigc := make(chan os.Signal, 5) + signal.Notify(sigc, os.Interrupt, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + + // Create a function to handle panic and exit gracefully + defer func() { + if err := recover(); err != nil { + if app.MQ != nil { + defer app.MQ.Channel.Close() + defer app.MQ.Connection.Close() + } + if app.DB != nil { + defer app.DB.Close() + } + log.Fatal(err) + } + }() + + forever := make(chan bool) + + app.Conf, err = config.NewConfig("rotatekey") + if err != nil { + panic(err) + } + app.MQ, err = broker.NewMQ(app.Conf.Broker) + if err != nil { + panic(err) + } + app.DB, err = database.NewSDAdb(app.Conf.Database) + if err != nil { + panic(err) + } + + go func() { + <-sigc // blocks here until it receives from sigc + _, _ = fmt.Println("Interrupt signal received. Shutting down.") + defer app.MQ.Channel.Close() + defer app.MQ.Connection.Close() + defer app.DB.Close() + + os.Exit(0) // exit program + }() + + // encode pubkey as pem and then as base64 string + tmp := &bytes.Buffer{} + if err := keys.WriteCrypt4GHX25519PublicKey(tmp, *app.Conf.RotateKey.PublicKey); err != nil { + panic(err) + } + app.PubKeyEncoded = base64.StdEncoding.EncodeToString(tmp.Bytes()) + + // Check that key is registered in the db at startup + err = app.DB.CheckKeyHash(hex.EncodeToString(app.Conf.RotateKey.PublicKey[:])) + if err != nil { + panic(fmt.Errorf("database lookup of the rotation key failed, reason: %v", err)) + } + + go func() { + connError := app.MQ.ConnectionWatcher() + log.Error(connError) + forever <- false + }() + + go func() { + connError := app.MQ.ChannelWatcher() + log.Error(connError) + forever <- false + }() + + log.Info("Starting rotatekey service") + var message schema.KeyRotation + + go func() { + // Create a function to handle panic and exit gracefully + defer func() { + if err := recover(); err != nil { + if app.MQ != nil { + defer app.MQ.Channel.Close() + defer app.MQ.Connection.Close() + } + if app.DB != nil { + defer app.DB.Close() + } + log.Fatal(err) + } + }() + messages, err := app.MQ.GetMessages(app.Conf.Broker.Queue) + if err != nil { + panic(err) + } + for delivered := range messages { + log.Debugf("Received a message (correlation-id: %s, message: %s)", + delivered.CorrelationId, + delivered.Body) + + err := schema.ValidateJSON(fmt.Sprintf("%s/rotate-key.json", app.Conf.Broker.SchemasPath), delivered.Body) + if err != nil { + msg := "validation of incoming message (rotate-key) failed" + log.Errorf("%s, reason: %v", msg, err) + // Ack message and send the payload to an error queue so it can be analyzed. + infoErrorMessage := broker.InfoError{ + Error: msg, + Reason: err.Error(), + OriginalMessage: string(delivered.Body), + } + body, _ := json.Marshal(infoErrorMessage) + if err := app.MQ.SendMessage(delivered.CorrelationId, app.Conf.Broker.Exchange, "error", body); err != nil { + log.Errorf("failed to publish message, reason: (%s)", err.Error()) + } + if err := delivered.Ack(false); err != nil { + log.Errorf("failed to Ack message, reason: (%s)", err.Error()) + } + + continue + } + + // Fetch rotate key hash before starting work so that we make sure the hash state + // has not changed since the application startup. + keyhash := hex.EncodeToString(app.Conf.RotateKey.PublicKey[:]) + // exit app if target key was modified after app start-up, e.g. if key has been deprecated + if err = app.DB.CheckKeyHash(keyhash); err != nil { + panic(fmt.Errorf("check of target key failed, reason: %v", err)) + } + + // we unmarshal the message in the validation step so this is safe to do + _ = json.Unmarshal(delivered.Body, &message) + + ackNack, msg, err := app.reEncryptHeader(message.FileID) + + switch ackNack { + case "ack": + if err := delivered.Ack(false); err != nil { + log.Errorf("failed to ack message, reason: %v", err) + } + case "ackSendToError": + infoErrorMessage := broker.InfoError{ + Error: msg, + Reason: err.Error(), + OriginalMessage: string(delivered.Body), + } + body, _ := json.Marshal(infoErrorMessage) + if err := app.MQ.SendMessage(delivered.CorrelationId, app.Conf.Broker.Exchange, "error", body); err != nil { + log.Errorf("failed to publish message, reason: (%s)", err.Error()) + } + if err := delivered.Ack(false); err != nil { + log.Errorf("failed to Ack message, reason: (%s)", err.Error()) + } + case "nackRequeue": + if err := delivered.Nack(false, true); err != nil { + log.Errorf("failed to Nack message, reason: %v", err) + } + default: + // will catch `reject`s, failures that should not be requeued. + if err := delivered.Reject(false); err != nil { + log.Errorf("failed to reject message, reason: %v", err) + } + } + } + }() + + <-forever +} + +func (app *RotateKey) reEncryptHeader(fileID string) (ackNack, msg string, err error) { + // Get current keyhash for the file, send to error queue if this fails + oldKeyHash, err := app.DB.GetKeyHash(fileID) + if err != nil { + msg := fmt.Sprintf("failed to get keyhash for file with file-id: %s", fileID) + log.Errorf("%s, reason: %v", msg, err) + + switch { + case strings.Contains(err.Error(), "sql: no rows in result set"): + return "ackSendToError", msg, err + default: + return "nackRequeue", msg, err + } + } + + // Check that the file is not already encrypted with the target key + keyhash := hex.EncodeToString(app.Conf.RotateKey.PublicKey[:]) + if oldKeyHash == keyhash { + log.Infof("the file with file-id: %s is already encrypted with the given rotation c4gh key", fileID) + + return "ack", "", nil + } + + // reencrypt header + log.Debugf("rotating c4gh key for file with file-id: %s", fileID) + + header, err := app.DB.GetHeader(fileID) + if err != nil { + msg := fmt.Sprintf("GetHeader failed for file-id: %s", fileID) + log.Errorf("%s, reason: %v", msg, err) + + switch { + case strings.Contains(err.Error(), "sql: no rows in result set"): + return "ackSendToError", msg, err + default: + return "nackRequeue", msg, err + } + } + + newHeader, err := reencrypt.CallReencryptHeader(header, app.PubKeyEncoded, app.Conf.RotateKey.Grpc) + if err != nil { + msg := fmt.Sprintf("failed to rotate c4gh key for file %s", fileID) + log.Errorf("%s, reason: %v", msg, err) + + return "ackSendToError", msg, err + } + + // Rotate header and keyhash in database + if err := app.DB.RotateHeaderKey(newHeader, keyhash, fileID); err != nil { + msg := fmt.Sprintf("RotateHeaderKey failed for file-id: %s", fileID) + log.Errorf("%s, reason: %v", msg, err) + + return "nackRequeue", msg, err + } + + // Send re-verify message + reVerify, err := app.DB.GetReVerificationDataFromFileID(fileID) + if err != nil { + msg := fmt.Sprintf("GetReVerificationData failed for file-id %s", fileID) + log.Errorf("%s, reason: %v", msg, err) + + return "ackSendToError", msg, err + } + + reVerifyMsg, _ := json.Marshal(&reVerify) + err = schema.ValidateJSON(fmt.Sprintf("%s/ingestion-verification.json", app.Conf.Broker.SchemasPath), reVerifyMsg) + if err != nil { + msg := "Validation of outgoing re-verify message failed" + log.Errorf("%s, reason: %v", msg, err) + + return "ackSendToError", msg, err + } + + if err := app.MQ.SendMessage(fileID, app.Conf.Broker.Exchange, "archived", reVerifyMsg); err != nil { + msg := "failed to publish message" + log.Errorf("%s, reason: %v", msg, err) + + return "ackSendToError", msg, err + } + + return "ack", "", nil +} diff --git a/sda/cmd/rotatekey/rotatekey.md b/sda/cmd/rotatekey/rotatekey.md new file mode 100644 index 000000000..007d6a4aa --- /dev/null +++ b/sda/cmd/rotatekey/rotatekey.md @@ -0,0 +1,111 @@ +# rotatekey Service + +Rotates the crypt4gh encryption key of ingested file headers. + +## Service Description + +The `rotatekey` service re-encrypts the header of a file with the configured target key, and updates the database with the new header and encryption key hash. + +When running, rotatekey reads messages from the `rotatekey` RabbitMQ queue. +For each message, these steps are taken: + +1. The message is validated as valid JSON that matches the "rotate-key" schema. +2. A database look-up is performed for the configured target public key hash. If the look-up fails or the key has been deprecated, the service will exit. +3. The key hash of the c4gh key with which the file is currently encrypted is fetched from the database and compared with the configured target key. +4. If these key hashes differ, the reencrypt service is called to re-encrypt the file header with the target key. +5. The file header entry in the database is updated with the new one. +6. The key hash entry in the database is updated with the new one (target key). +7. A re-verify message is compiled, validated and sent to the archived queue so that it is consumed by the `verify` service. +8. The message is Ack'ed. + +In case of any errors during the above process, progress will be halted the message is Nack'ed, an info-error message is sent and the service moves on to the next message. + +## Communication + +- Rotatekey reads messages from one rabbitmq queue (`rotatekey`). +- Rotatekey reads file information, headers and key hashes from the database and can not be started without a database connection. +- Rotatekey makes grpc calls to `reencrypt` service for re-encrypting the header with the target public key. +- Rotatekey sends messages to the `archived` queue for consumption by the `verify` service. + +## Configuration + +There are a number of options that can be set for the rotatekey service. +These settings can be set by mounting a yaml-file at `/config.yaml` with settings. + +ex. + +```yaml +log: + level: "debug" + format: "json" +``` + +They may also be set using environment variables like: + +```bash +export LOG_LEVEL="debug" +export LOG_FORMAT="json" +``` + +### Public Key file settings + +This setting controls which crypt4gh keyfile is loaded. + +- `C4GH_ROTATEPUBKEYPATH`: path to the crypt4gh public key to use for reencrypting file headers. + +### RabbitMQ broker settings + +These settings control how sync connects to the RabbitMQ message broker. + +- `BROKER_HOST`: hostname of the rabbitmq server +- `BROKER_PORT`: rabbitmq broker port (commonly `5671` with TLS and `5672` without) +- `BROKER_QUEUE`: message queue or stream to read messages from (commonly `rotatekey_stream`) +- `BROKER_USER`: username to connect to rabbitmq +- `BROKER_PASSWORD`: password to connect to rabbitmq +- `BROKER_ROUTINGKEY`: routing from a rabbitmq exchange to the rotatekey queue + +### PostgreSQL Database settings + +- `DB_HOST`: hostname for the postgresql database +- `DB_PORT`: database port (commonly 5432) +- `DB_USER`: username for the database +- `DB_PASSWORD`: password for the database +- `DB_DATABASE`: database name +- `DB_SSLMODE`: The TLS encryption policy to use for database connections. Valid options are: + - `disable` + - `allow` + - `prefer` + - `require` + - `verify-ca` + - `verify-full` + + More information is available + [in the postgresql documentation](https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-PROTECTION) + + Note that if `DB_SSLMODE` is set to anything but `disable`, then `DB_CACERT` needs to be set, + and if set to `verify-full`, then `DB_CLIENTCERT`, and `DB_CLIENTKEY` must also be set. + +- `DB_CLIENTKEY`: key file for the database client certificate +- `DB_CLIENTCERT`: database client certificate file +- `DB_CACERT`: Certificate Authority (CA) certificate for the database to use + +### GRPC settings + +- `GRPC_HOST`: Host name of the grpc server +- `GRPC_PORT`: Port number of the grpc server +- `GRPC_CACERT`: Certificate Authority (CA) certificate for validating incoming request +- `GRPC_SERVERCERT`: path to the x509 certificate used by the service +- `GRPC_SERVERKEY`: path to the x509 private key used by the service + + +### Logging settings + +- `LOG_FORMAT` can be set to “json” to get logs in json format. All other values result in text logging +- `LOG_LEVEL` can be set to one of the following, in increasing order of severity: + - `trace` + - `debug` + - `info` + - `warn` (or `warning`) + - `error` + - `fatal` + - `panic` diff --git a/sda/cmd/rotatekey/rotatekey_test.go b/sda/cmd/rotatekey/rotatekey_test.go new file mode 100644 index 000000000..4de3af6bc --- /dev/null +++ b/sda/cmd/rotatekey/rotatekey_test.go @@ -0,0 +1,321 @@ +package main + +import ( + "context" + "database/sql" + "encoding/hex" + "errors" + "fmt" + "net" + "net/http" + "os" + "path" + "runtime" + "strconv" + "testing" + "time" + + "github.com/google/uuid" + "github.com/neicnordic/crypt4gh/keys" + "github.com/neicnordic/sensitive-data-archive/internal/broker" + "github.com/neicnordic/sensitive-data-archive/internal/config" + "github.com/neicnordic/sensitive-data-archive/internal/database" + re "github.com/neicnordic/sensitive-data-archive/internal/reencrypt" + "github.com/ory/dockertest" + "github.com/ory/dockertest/docker" + log "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +var dbPort, mqPort int + +func TestMain(m *testing.M) { + if _, err := os.Stat("/.dockerenv"); err == nil { + m.Run() + } + _, b, _, _ := runtime.Caller(0) + rootDir := path.Join(path.Dir(b), "../../../") + + // uses a sensible default on windows (tcp/http) and linux/osx (socket) + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not construct pool: %s", err) + } + + // uses pool to try to connect to Docker + err = pool.Client.Ping() + if err != nil { + log.Fatalf("Could not connect to Docker: %s", err) + } + + // pulls an image, creates a container based on it and runs it + postgres, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "15.4-alpine3.17", + Env: []string{ + "POSTGRES_PASSWORD=rootpasswd", + "POSTGRES_DB=sda", + }, + Mounts: []string{ + fmt.Sprintf("%s/postgresql/initdb.d:/docker-entrypoint-initdb.d", rootDir), + }, + }, func(config *docker.HostConfig) { + // set AutoRemove to true so that stopped container goes away by itself + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{ + Name: "no", + } + }) + if err != nil { + log.Fatalf("Could not start resource: %s", err) + } + + dbHostAndPort := postgres.GetHostPort("5432/tcp") + dbPort, _ = strconv.Atoi(postgres.GetPort("5432/tcp")) + databaseURL := fmt.Sprintf("postgres://postgres:rootpasswd@%s/sda?sslmode=disable", dbHostAndPort) + + pool.MaxWait = 120 * time.Second + if err = pool.Retry(func() error { + db, err := sql.Open("postgres", databaseURL) + if err != nil { + log.Println(err) + + return err + } + + query := "SELECT MAX(version) FROM sda.dbschema_version;" + var dbVersion int + + return db.QueryRow(query).Scan(&dbVersion) + }); err != nil { + log.Fatalf("Could not connect to postgres: %s", err) + } + + // pulls an image, creates a container based on it and runs it + rabbitmq, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "ghcr.io/neicnordic/sensitive-data-archive", + Tag: "v3.0.0-rabbitmq", + }, func(config *docker.HostConfig) { + // set AutoRemove to true so that stopped container goes away by itself + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{ + Name: "no", + } + }) + if err != nil { + if err := pool.Purge(postgres); err != nil { + log.Fatalf("Could not purge resource: %s", err) + } + log.Fatalf("Could not start resource: %s", err) + } + + mqPort, _ = strconv.Atoi(rabbitmq.GetPort("5672/tcp")) + brokerAPI := rabbitmq.GetHostPort("15672/tcp") + + client := http.Client{Timeout: 30 * time.Second} + req, err := http.NewRequest(http.MethodGet, "http://"+brokerAPI+"/api/queues/sda/", http.NoBody) + if err != nil { + log.Fatal(err) + } + req.SetBasicAuth("guest", "guest") + + // exponential backoff-retry, because the application in the container might not be ready to accept connections yet + if err := pool.Retry(func() error { + res, err := client.Do(req) + if err != nil || res.StatusCode != 200 { + return err + } + _ = res.Body.Close() + + return nil + }); err != nil { + if err := pool.Purge(postgres); err != nil { + log.Fatalf("Could not purge resource: %s", err) + } + if err := pool.Purge(rabbitmq); err != nil { + log.Fatalf("Could not purge resource: %s", err) + } + log.Fatalf("Could not connect to rabbitmq: %s", err) + } + + log.Println("starting tests") + code := m.Run() + + log.Println("tests completed") + if err := pool.Purge(postgres); err != nil { + log.Fatalf("Could not purge resource: %s", err) + } + if err := pool.Purge(rabbitmq); err != nil { + log.Fatalf("Could not purge resource: %s", err) + } + + os.Exit(code) +} + +type TestSuite struct { + suite.Suite + app RotateKey + fileID string + privateKeyList []*[32]byte +} +type server struct { + re.UnimplementedReencryptServer + c4ghPrivateKeyList []*[32]byte +} + +func TestRotateKeyTestSuite(t *testing.T) { + suite.Run(t, new(TestSuite)) +} + +func (ts *TestSuite) SetupSuite() { + ts.app.Conf = &config.Config{} + ts.app.Conf.Broker.SchemasPath = "../../schemas/isolated" + var err error + ts.app.DB, err = database.NewSDAdb(database.DBConf{ + Host: "localhost", + Port: dbPort, + User: "postgres", + Password: "rootpasswd", + Database: "sda", + SslMode: "disable", + }) + if err != nil { + ts.FailNow("Failed to create DB connection") + } + + ts.app.MQ, err = broker.NewMQ(broker.MQConf{ + Host: "localhost", + Port: mqPort, + User: "guest", + Password: "guest", + Exchange: "sda", + Vhost: "/sda", + }) + if err != nil { + ts.T().Log(err.Error()) + ts.FailNow("Failed to create MQ connection") + } + + publicKey, _, err := keys.GenerateKeyPair() + if err != nil { + ts.FailNow("Failed to create new c4gh keypair") + } + + for i, kh := range []string{"79f2f4dd9cd9435743d5e8ef3d0da55d64437055e89cfa5531395abf8857bd63", hex.EncodeToString(publicKey[:])} { + if err := ts.app.DB.AddKeyHash(kh, fmt.Sprintf("key num: %d", i)); err != nil { + ts.FailNow("failed to register a public key") + } + } + + ts.app.Conf.RotateKey.PublicKey = &publicKey + + ts.fileID, err = ts.app.DB.RegisterFile(nil, "rotate-key-test/data.c4gh", "tester_example.org") + if err != nil { + ts.FailNow("Failed to register file in DB") + } + for _, status := range []string{"uploaded", "archived", "verified"} { + if err = ts.app.DB.UpdateFileEventLog(ts.fileID, status, "tester_example.org", "{}", "{}"); err != nil { + ts.FailNow("Failed to set status of file in DB") + } + } + if err := ts.app.DB.SetKeyHash("79f2f4dd9cd9435743d5e8ef3d0da55d64437055e89cfa5531395abf8857bd63", ts.fileID); err != nil { + ts.FailNow("Failed to set key hash of file in DB") + } + if err := ts.app.DB.StoreHeader([]byte("637279707434676801000000010000006c000000000000004f6ae97503ac19b6316cb3330ea4e55e0fa98ed7342afc79deec64606aa33a587e78743695f3be5d5b9d0f386c2b66aefb06de07c506eccec4910455d75f54ce6324b98b4dd35dcc6c0684bbf8a05fb5c2976f540dbbbc95646c2e55ec52c5833115e5659"), ts.fileID); err != nil { + ts.FailNow("Failed to store header of file in DB") + } + + fileInfo := database.FileInfo{ + ArchiveChecksum: "239729e2f471a02f8b43374fa58ea2d3a85ec93874b58696030b4af804c32f36", + DecryptedChecksum: "9aa63cfe45c560c8f16dde4b002a3fe38afa69801df6a6e266b757ab6aace2d8", + DecryptedSize: 34, + Path: ts.fileID, + Size: 59, + } + if err := ts.app.DB.SetVerified(fileInfo, ts.fileID); err != nil { + ts.FailNow("Failed to store header of file in DB") + } + + lis, err := net.Listen("tcp", "localhost:") + if err != nil { + log.Errorf("failed to create listener: %v", err) + ts.T().FailNow() + } + reHost, rePort, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + ts.T().FailNow() + } + go func() { + var opts []grpc.ServerOption + s := grpc.NewServer(opts...) + re.RegisterReencryptServer(s, &server{c4ghPrivateKeyList: ts.privateKeyList}) + reflection.Register(s) + if err := s.Serve(lis); err != nil { + log.Errorf("failed to start GRPC server: %v", err) + ts.T().Fail() + } + }() + + rePortInt, err := strconv.Atoi(rePort) + if err != nil { + ts.T().FailNow() + } + + ts.app.Conf.RotateKey.Grpc = config.Grpc{ + Host: reHost, + Port: rePortInt, + Timeout: 30, + } + + ts.T().Log("suite setup completed") +} + +// ReencryptHeader serves a mock response since we don't need to test the actual reencryption +func (s *server) ReencryptHeader(ctx context.Context, req *re.ReencryptRequest) (*re.ReencryptResponse, error) { + // Mock response based on your needs + if req.Publickey == "phail" { + return &re.ReencryptResponse{}, errors.New("bad error") + } + + mockedResponse := &re.ReencryptResponse{ + Header: []byte("predefined header response"), + } + + return mockedResponse, nil +} + +func (ts *TestSuite) TestReEncryptHeader() { + newFileID := uuid.NewString() + for _, test := range []struct { + expectedError error + expectedMgs string + expectedRes string + fileID string + testName string + }{ + { + testName: "ingested file", + expectedError: nil, + expectedMgs: "", + expectedRes: "ack", + fileID: ts.fileID, + }, + { + testName: "un-ingested file", + expectedError: errors.New("sql: no rows in result set"), + expectedMgs: fmt.Sprintf("failed to get keyhash for file with file-id: %s", newFileID), + expectedRes: "ackSendToError", + fileID: newFileID, + }, + } { + ts.T().Run(test.testName, func(t *testing.T) { + res, msg, err := ts.app.reEncryptHeader(test.fileID) + assert.Equal(t, res, test.expectedRes) + assert.Equal(t, msg, test.expectedMgs) + assert.Equal(t, err, test.expectedError) + }) + } +} diff --git a/sda/cmd/s3inbox/proxy.go b/sda/cmd/s3inbox/proxy.go index 0604ec688..bfe066dc0 100644 --- a/sda/cmd/s3inbox/proxy.go +++ b/sda/cmd/s3inbox/proxy.go @@ -29,6 +29,10 @@ import ( log "github.com/sirupsen/logrus" ) +type uniqueFileID struct { + username, filePath string +} + // Proxy represents the toplevel object in this application type Proxy struct { s3 storage.S3Conf @@ -36,7 +40,7 @@ type Proxy struct { messenger *broker.AMQPBroker database *database.SDAdb client *http.Client - fileIDs map[string]string + fileIDs map[uniqueFileID]string } // The Event struct @@ -82,7 +86,7 @@ func NewProxy(s3conf storage.S3Conf, auth userauth.Authenticator, messenger *bro tr := &http.Transport{TLSClientConfig: tlsConf} client := &http.Client{Transport: tr, Timeout: 30 * time.Second} - return &Proxy{s3conf, auth, messenger, db, client, make(map[string]string)} + return &Proxy{s3conf, auth, messenger, db, client, make(map[uniqueFileID]string)} } func (p *Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -160,12 +164,17 @@ func (p *Proxy) allowedResponse(w http.ResponseWriter, r *http.Request, token jw return } + fileIdentifier := uniqueFileID{ + username: username, + filePath: filepath, + } + // if this is an upload request - if p.detectRequestType(r) == Put && p.fileIDs[r.URL.Path] == "" { + if p.detectRequestType(r) == Put && p.fileIDs[fileIdentifier] == "" { // register file in database log.Debugf("registering file %v in the database", r.URL.Path) - p.fileIDs[r.URL.Path], err = p.database.RegisterFile(filepath, username) - log.Debugf("fileId: %v", p.fileIDs[r.URL.Path]) + p.fileIDs[fileIdentifier], err = p.database.RegisterFile(nil, filepath, username) + log.Debugf("fileId: %v", p.fileIDs[fileIdentifier]) if err != nil { p.internalServerError(w, r, fmt.Sprintf("failed to register file in database: %v", err)) @@ -174,7 +183,7 @@ func (p *Proxy) allowedResponse(w http.ResponseWriter, r *http.Request, token jw // check if the file already exists, in that case send an overwrite message, // so that the FEGA portal is informed that a new version of the file exists. - err = p.sendMessageOnOverwrite(r, rawFilepath, token) + err = p.sendMessageOnOverwrite(p.fileIDs[fileIdentifier], r, rawFilepath, token) if err != nil { p.internalServerError(w, r, err.Error()) @@ -207,7 +216,7 @@ func (p *Proxy) allowedResponse(w http.ResponseWriter, r *http.Request, token jw return } - err = p.checkAndSendMessage(jsonMessage, r) + err = p.checkAndSendMessage(p.fileIDs[fileIdentifier], jsonMessage, r) if err != nil { p.internalServerError(w, r, fmt.Sprintf("broker error: %v", err)) @@ -216,33 +225,33 @@ func (p *Proxy) allowedResponse(w http.ResponseWriter, r *http.Request, token jw // The following block is for treating the case when the client loses connection to the server and then it reconnects to a // different instance of s3inbox. For more details see #1358. - if p.fileIDs[r.URL.Path] == "" { - p.fileIDs[r.URL.Path], err = p.database.GetFileIDByUserPathAndStatus(username, filepath, "registered") + if p.fileIDs[fileIdentifier] == "" { + p.fileIDs[fileIdentifier], err = p.database.GetFileIDByUserPathAndStatus(username, filepath, "registered") if err != nil { p.internalServerError(w, r, fmt.Sprintf("failed to retrieve fileID from database: %v", err)) return } - log.Debugf("resuming work on file with fileId: %v", p.fileIDs[r.URL.Path]) + log.Debugf("resuming work on file with fileId: %v", p.fileIDs[fileIdentifier]) } - if err := p.storeObjectSizeInDB(rawFilepath, p.fileIDs[r.URL.Path]); err != nil { + if err := p.storeObjectSizeInDB(rawFilepath, p.fileIDs[fileIdentifier]); err != nil { log.Errorf("storeObjectSizeInDB failed because: %s", err.Error()) p.internalServerError(w, r, "storeObjectSizeInDB failed") return } - log.Debugf("marking file %v as 'uploaded' in database", p.fileIDs[r.URL.Path]) - err = p.database.UpdateFileEventLog(p.fileIDs[r.URL.Path], "uploaded", p.fileIDs[r.URL.Path], "inbox", "{}", string(jsonMessage)) + log.Debugf("marking file %v as 'uploaded' in database", p.fileIDs[fileIdentifier]) + err = p.database.UpdateFileEventLog(p.fileIDs[fileIdentifier], "uploaded", "inbox", "{}", string(jsonMessage)) if err != nil { p.internalServerError(w, r, fmt.Sprintf("could not connect to db: %v", err)) return } - delete(p.fileIDs, r.URL.Path) + delete(p.fileIDs, fileIdentifier) } // Writing non-200 to the response before the headers propagate the error @@ -276,7 +285,7 @@ func (p *Proxy) allowedResponse(w http.ResponseWriter, r *http.Request, token jw } // Renew the connection to MQ if necessary, then send message -func (p *Proxy) checkAndSendMessage(jsonMessage []byte, r *http.Request) error { +func (p *Proxy) checkAndSendMessage(fileID string, jsonMessage []byte, r *http.Request) error { var err error if p.messenger == nil { return errors.New("messenger is down") @@ -297,8 +306,8 @@ func (p *Proxy) checkAndSendMessage(jsonMessage []byte, r *http.Request) error { } } - log.Debugf("Sending message with id %s", p.fileIDs[r.URL.Path]) - if err := p.messenger.SendMessage(p.fileIDs[r.URL.Path], p.messenger.Conf.Exchange, p.messenger.Conf.RoutingKey, jsonMessage); err != nil { + log.Debugf("Sending message with id %s", fileID) + if err := p.messenger.SendMessage(fileID, p.messenger.Conf.Exchange, p.messenger.Conf.RoutingKey, jsonMessage); err != nil { return fmt.Errorf("error when sending message to broker: %v", err) } @@ -562,7 +571,7 @@ func (p *Proxy) checkFileExists(fullPath string) (bool, error) { return result != nil, err } -func (p *Proxy) sendMessageOnOverwrite(r *http.Request, rawFilepath string, token jwt.Token) error { +func (p *Proxy) sendMessageOnOverwrite(fileID string, r *http.Request, rawFilepath string, token jwt.Token) error { exist, err := p.checkFileExists(r.URL.Path) if err != nil { return err @@ -583,7 +592,7 @@ func (p *Proxy) sendMessageOnOverwrite(r *http.Request, rawFilepath string, toke return err } - err = p.checkAndSendMessage(jsonMessage, r) + err = p.checkAndSendMessage(fileID, jsonMessage, r) if err != nil { return err } diff --git a/sda/cmd/s3inbox/proxy_test.go b/sda/cmd/s3inbox/proxy_test.go index 13e901b58..03e12747f 100644 --- a/sda/cmd/s3inbox/proxy_test.go +++ b/sda/cmd/s3inbox/proxy_test.go @@ -140,7 +140,7 @@ func (s *ProxyTests) SetupTest() { ) _, _ = s3Client.CreateBucket(context.TODO(), &s3.CreateBucketInput{Bucket: aws.String(s.S3conf.Bucket)}) if err != nil { - fmt.Println(err.Error()) + _, _ = fmt.Println(err.Error()) } output, err := s3Client.PutObject(context.TODO(), &s3.PutObjectInput{ @@ -176,11 +176,11 @@ func startFakeServer(port string) *FakeServer { log.Warnf("hello fake will return %s", f.resp) if f.resp != "" { log.Warnf("fake writes %s", f.resp) - fmt.Fprint(w, f.resp) + _, _ = fmt.Fprint(w, f.resp) } }) ts := httptest.NewUnstartedServer(foo) - ts.Listener.Close() + _ = ts.Listener.Close() ts.Listener = l ts.Start() @@ -630,7 +630,7 @@ func (s *ProxyTests) TestStoreObjectSizeInDB() { p := NewProxy(s.S3conf, helper.NewAlwaysAllow(), s.messenger, s.database, new(tls.Config)) p.database = db - fileID, err := db.RegisterFile("/dummy/file", "test-user") + fileID, err := db.RegisterFile(nil, "/dummy/file", "test-user") assert.NoError(s.T(), err) assert.NotNil(s.T(), fileID) @@ -653,7 +653,7 @@ func (s *ProxyTests) TestStoreObjectSizeInDB_dbFailure() { p := NewProxy(s.S3conf, helper.NewAlwaysAllow(), s.messenger, s.database, new(tls.Config)) p.database = db - fileID, err := db.RegisterFile("/dummy/file", "test-user") + fileID, err := db.RegisterFile(nil, "/dummy/file", "test-user") assert.NoError(s.T(), err) assert.NotNil(s.T(), fileID) @@ -672,7 +672,7 @@ func (s *ProxyTests) TestStoreObjectSizeInDB_s3Failure() { p := NewProxy(s.S3conf, helper.NewAlwaysAllow(), s.messenger, s.database, new(tls.Config)) p.database = db - fileID, err := db.RegisterFile("/dummy/file", "test-user") + fileID, err := db.RegisterFile(nil, "/dummy/file", "test-user") assert.NoError(s.T(), err) assert.NotNil(s.T(), fileID) @@ -699,7 +699,7 @@ func (s *ProxyTests) TestStoreObjectSizeInDB_fastCheck() { p := NewProxy(s.S3conf, helper.NewAlwaysAllow(), s.messenger, s.database, new(tls.Config)) p.database = db - fileID, err := db.RegisterFile("/test/new_file", "test-user") + fileID, err := db.RegisterFile(nil, "/test/new_file", "test-user") assert.NoError(s.T(), err) assert.NotNil(s.T(), fileID) diff --git a/sda/cmd/s3inbox/s3inbox_test.go b/sda/cmd/s3inbox/s3inbox_test.go index 5f2c7eae2..c0dcfae2b 100644 --- a/sda/cmd/s3inbox/s3inbox_test.go +++ b/sda/cmd/s3inbox/s3inbox_test.go @@ -78,7 +78,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { @@ -161,7 +161,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { @@ -218,7 +218,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { diff --git a/sda/cmd/sync/sync.go b/sda/cmd/sync/sync.go index 7d71051a7..67c174136 100644 --- a/sda/cmd/sync/sync.go +++ b/sda/cmd/sync/sync.go @@ -25,7 +25,7 @@ import ( var ( err error - key, publicKey *[32]byte + key *[32]byte db *database.SDAdb conf *config.Config archive, syncDestination storage.Backend @@ -60,11 +60,6 @@ func main() { log.Fatal(err) } - publicKey, err = config.GetC4GHPublicKey() - if err != nil { - log.Fatal(err) - } - defer mq.Channel.Close() defer mq.Connection.Close() defer db.Close() @@ -90,7 +85,7 @@ func main() { log.Fatal(err) } for delivered := range messages { - log.Debugf("Received a message (corr-id: %s, message: %s)", + log.Debugf("Received a message (correlation-id: %s, message: %s)", delivered.CorrelationId, delivered.Body) @@ -196,7 +191,7 @@ func syncFiles(stableID string) error { } pubkeyList := [][chacha20poly1305.KeySize]byte{} - pubkeyList = append(pubkeyList, *publicKey) + pubkeyList = append(pubkeyList, *conf.Sync.PublicKey) newHeader, err := headers.ReEncryptHeader(header, *key, pubkeyList) if err != nil { return err diff --git a/sda/cmd/sync/sync_test.go b/sda/cmd/sync/sync_test.go index 95d35a981..c3c10d2f6 100644 --- a/sda/cmd/sync/sync_test.go +++ b/sda/cmd/sync/sync_test.go @@ -28,6 +28,7 @@ var dbPort int type SyncTest struct { suite.Suite + keyPath string } func TestSyncTestSuite(t *testing.T) { @@ -135,30 +136,30 @@ func (s *SyncTest) SetupTest() { viper.Set("sync.remote.password", "pass") key := "-----BEGIN CRYPT4GH ENCRYPTED PRIVATE KEY-----\nYzRnaC12MQAGc2NyeXB0ABQAAAAAEna8op+BzhTVrqtO5Rx7OgARY2hhY2hhMjBfcG9seTEzMDUAPMx2Gbtxdva0M2B0tb205DJT9RzZmvy/9ZQGDx9zjlObj11JCqg57z60F0KhJW+j/fzWL57leTEcIffRTA==\n-----END CRYPT4GH ENCRYPTED PRIVATE KEY-----" - keyPath, _ := os.MkdirTemp("", "key") - err := os.WriteFile(keyPath+"/c4gh.key", []byte(key), 0600) + s.keyPath, _ = os.MkdirTemp("", "key") + err := os.WriteFile(s.keyPath+"/c4gh.key", []byte(key), 0600) assert.NoError(s.T(), err) - viper.Set("c4gh.filepath", keyPath+"/c4gh.key") + viper.Set("c4gh.filepath", s.keyPath+"/c4gh.key") viper.Set("c4gh.passphrase", "test") pubKey := "-----BEGIN CRYPT4GH PUBLIC KEY-----\nuQO46R56f/Jx0YJjBAkZa2J6n72r6HW/JPMS4tfepBs=\n-----END CRYPT4GH PUBLIC KEY-----" - err = os.WriteFile(keyPath+"/c4gh.pub", []byte(pubKey), 0600) + err = os.WriteFile(s.keyPath+"/c4gh.pub", []byte(pubKey), 0600) assert.NoError(s.T(), err) - viper.Set("c4gh.syncPubKeyPath", keyPath+"/c4gh.pub") - - defer os.RemoveAll(keyPath) + viper.Set("c4gh.syncPubKeyPath", s.keyPath+"/c4gh.pub") } func (s *SyncTest) TestBuildSyncDatasetJSON() { s.SetupTest() + defer os.RemoveAll(s.keyPath) + conf, err := config.NewConfig("sync") assert.NoError(s.T(), err) db, err = database.NewSDAdb(conf.Database) assert.NoError(s.T(), err) - fileID, err := db.RegisterFile("dummy.user/test/file1.c4gh", "dummy.user") + fileID, err := db.RegisterFile(nil, "dummy.user/test/file1.c4gh", "dummy.user") assert.NoError(s.T(), err, "failed to register file in database") err = db.SetAccessionID("ed6af454-d910-49e3-8cda-488a6f246e67", fileID) assert.NoError(s.T(), err) diff --git a/sda/cmd/syncapi/syncapi_test.go b/sda/cmd/syncapi/syncapi_test.go index da21dbab4..9eeeed92b 100644 --- a/sda/cmd/syncapi/syncapi_test.go +++ b/sda/cmd/syncapi/syncapi_test.go @@ -80,7 +80,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { diff --git a/sda/cmd/verify/verify.go b/sda/cmd/verify/verify.go index 572ac6c5c..fc0a008bd 100644 --- a/sda/cmd/verify/verify.go +++ b/sda/cmd/verify/verify.go @@ -71,7 +71,7 @@ func main() { err) } for delivered := range messages { - log.Debugf("received a message (corr-id: %s, message: %s)", delivered.CorrelationId, delivered.Body) + log.Debugf("received a message (correlation-id: %s, message: %s)", delivered.CorrelationId, delivered.Body) err := schema.ValidateJSON(fmt.Sprintf("%s/ingestion-verification.json", conf.Broker.SchemasPath), delivered.Body) if err != nil { log.Errorf("validation of incoming message (ingestion-verification) failed, correlation-id: %s, reason: (%s)", delivered.CorrelationId, err.Error()) @@ -97,14 +97,14 @@ func main() { _ = json.Unmarshal(delivered.Body, &message) log.Infof( - "Received work (corr-id: %s, filepath: %s, user: %s)", - delivered.CorrelationId, message.FilePath, message.User, + "Received work (message.correlation-id: %s, file-id: %s, filepath: %s, user: %s)", + delivered.CorrelationId, message.FileID, message.FilePath, message.User, ) // If the file has been canceled by the uploader, don't spend time working on it. - status, err := db.GetFileStatus(delivered.CorrelationId) + status, err := db.GetFileStatus(message.FileID) if err != nil { - log.Errorf("failed to get file status, correlation-id: %s, reason: (%s)", delivered.CorrelationId, err.Error()) + log.Errorf("failed to get file status, file-id: %s, reason: (%s)", message.FileID, err.Error()) // Send the message to an error queue so it can be analyzed. infoErrorMessage := broker.InfoError{ Error: "Getheader failed", @@ -113,7 +113,7 @@ func main() { } body, _ := json.Marshal(infoErrorMessage) - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, "error", body); err != nil { + if err := mq.SendMessage(message.FileID, conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: (%s)", err.Error()) } @@ -124,7 +124,7 @@ func main() { continue } if status == "disabled" { - log.Infof("file with correlation-id: %s is disabled, stopping verification", delivered.CorrelationId) + log.Infof("file with file-id: %s is disabled, stopping verification", message.FileID) if err := delivered.Ack(false); err != nil { log.Errorf("Failed acking canceled work, reason: (%s)", err.Error()) } @@ -148,7 +148,7 @@ func main() { body, _ := json.Marshal(infoErrorMessage) // Send the message to an error queue so it can be analyzed. - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, "error", body); err != nil { + if err := mq.SendMessage(message.FileID, conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: (%s)", err.Error()) } @@ -161,7 +161,7 @@ func main() { log.Errorf("Failed to get archived file size, file-id: %s, archive-path: %s, reason: (%s)", message.FileID, message.ArchivePath, err.Error()) if strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "NoSuchKey:") || strings.Contains(err.Error(), "NotFound:") { jsonMsg, _ := json.Marshal(map[string]string{"error": err.Error()}) - if err := db.UpdateFileEventLog(message.FileID, "error", delivered.CorrelationId, "verify", string(jsonMsg), string(delivered.Body)); err != nil { + if err := db.UpdateFileEventLog(message.FileID, "error", "verify", string(jsonMsg), string(delivered.Body)); err != nil { log.Errorf("failed to set ingestion status for file from message, file-id: %v", message.FileID) } } @@ -177,7 +177,7 @@ func main() { OriginalMessage: message, } body, _ := json.Marshal(fileError) - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, "error", body); err != nil { + if err := mq.SendMessage(message.FileID, conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: (%s)", err.Error()) } @@ -196,7 +196,7 @@ func main() { } body, _ := json.Marshal(infoErrorMessage) - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, "error", body); err != nil { + if err := mq.SendMessage(message.FileID, conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: (%s)", err.Error()) } @@ -243,7 +243,7 @@ func main() { } body, _ := json.Marshal(infoErrorMessage) - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, "error", body); err != nil { + if err := mq.SendMessage(message.FileID, conf.Broker.Exchange, "error", body); err != nil { log.Errorf("Failed to publish error message, reason: (%s)", err.Error()) } @@ -273,7 +273,7 @@ func main() { if file.DecryptedChecksum != decrypted { log.Errorf("encrypted checksum don't match for file, file-id: %s", message.FileID) - if err := db.UpdateFileEventLog(message.FileID, "error", delivered.CorrelationId, "verify", `{"error":"decrypted checksum don't match"}`, string(delivered.Body)); err != nil { + if err := db.UpdateFileEventLog(message.FileID, "error", "verify", `{"error":"decrypted checksum don't match"}`, string(delivered.Body)); err != nil { log.Errorf("set status ready failed, file-id: %s, reason: (%v)", message.FileID, err) if err := delivered.Nack(false, true); err != nil { log.Errorf("failed to Nack message, reason: (%v)", err) @@ -290,7 +290,7 @@ func main() { if file.ArchiveChecksum != message.EncryptedChecksums[0].Value { log.Errorf("encrypted checksum mismatch for file, file-id: %s, filepath: %s, expected: %s, got: %s", message.FileID, message.FilePath, message.EncryptedChecksums[0].Value, file.ArchiveChecksum) - if err := db.UpdateFileEventLog(message.FileID, "error", delivered.CorrelationId, "verify", `{"error":"encrypted checksum don't match"}`, string(delivered.Body)); err != nil { + if err := db.UpdateFileEventLog(message.FileID, "error", "verify", `{"error":"encrypted checksum don't match"}`, string(delivered.Body)); err != nil { log.Errorf("set status ready failed, file-id: %s, reason: (%v)", message.FileID, err) if err := delivered.Nack(false, true); err != nil { log.Errorf("failed to Nack message, reason: (%v)", err) @@ -323,9 +323,9 @@ func main() { // Logging is in ValidateJSON so just restart on new message continue } - status, err := db.GetFileStatus(delivered.CorrelationId) + status, err := db.GetFileStatus(message.FileID) if err != nil { - log.Errorf("failed to get file status, correlation-id: %s, reason: (%s)", delivered.CorrelationId, err.Error()) + log.Errorf("failed to get file status, file-id: %s, reason: (%s)", message.FileID, err.Error()) // Send the message to an error queue so it can be analyzed. infoErrorMessage := broker.InfoError{ Error: "Getheader failed", @@ -334,7 +334,7 @@ func main() { } body, _ := json.Marshal(infoErrorMessage) - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, "error", body); err != nil { + if err := mq.SendMessage(message.FileID, conf.Broker.Exchange, "error", body); err != nil { log.Errorf("failed to publish message, reason: (%s)", err.Error()) } @@ -346,7 +346,7 @@ func main() { } if status == "disabled" { - log.Infof("file with correlation-id: %s is disabled, stopping verification", delivered.CorrelationId) + log.Infof("file with file-id: %s is disabled, stopping verification", message.FileID) if err := delivered.Ack(false); err != nil { log.Errorf("Failed acking canceled work, reason: (%s)", err.Error()) } @@ -377,7 +377,7 @@ func main() { log.Infof("file is already verified, file-id: %s", message.FileID) } - if err := db.UpdateFileEventLog(message.FileID, "verified", delivered.CorrelationId, "ingest", "{}", string(verifiedMessage)); err != nil { + if err := db.UpdateFileEventLog(message.FileID, "verified", "ingest", "{}", string(verifiedMessage)); err != nil { log.Errorf("failed to set event log status for file, file-id: %s", message.FileID) if err := delivered.Nack(false, true); err != nil { log.Errorf("failed to Nack message, reason: (%s)", err.Error()) @@ -387,7 +387,7 @@ func main() { } // Send message to verified queue - if err := mq.SendMessage(delivered.CorrelationId, conf.Broker.Exchange, conf.Broker.RoutingKey, verifiedMessage); err != nil { + if err := mq.SendMessage(message.FileID, conf.Broker.Exchange, conf.Broker.RoutingKey, verifiedMessage); err != nil { // TODO fix resend mechanism log.Errorf("failed to publish message, reason: (%s)", err.Error()) diff --git a/sda/config_local.yaml b/sda/config_local.yaml index d2eb9f088..d4f70f3c7 100644 --- a/sda/config_local.yaml +++ b/sda/config_local.yaml @@ -80,8 +80,8 @@ c4gh: privateKeys: - filePath: "/tmp/shared/c4gh.sec.pem" passphrase: "c4ghpass" - - filePath: "/tmp/shared/c4gh1.sec.pem" - passphrase: "c4ghpass" + - filePath: "/tmp/shared/rotatekey.sec.pem" + passphrase: "rotatekeyPass" oidc: configuration: diff --git a/sda/go.mod b/sda/go.mod index 27e056de2..6ef93e729 100644 --- a/sda/go.mod +++ b/sda/go.mod @@ -3,15 +3,15 @@ module github.com/neicnordic/sensitive-data-archive go 1.24.1 require ( - github.com/aws/aws-sdk-go-v2 v1.39.0 - github.com/aws/aws-sdk-go-v2/config v1.31.7 - github.com/aws/aws-sdk-go-v2/credentials v1.18.11 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.5 - github.com/aws/aws-sdk-go-v2/service/s3 v1.88.0 + github.com/aws/aws-sdk-go-v2 v1.32.8 + github.com/aws/aws-sdk-go-v2/config v1.28.11 + github.com/aws/aws-sdk-go-v2/credentials v1.17.52 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48 + github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 github.com/aws/smithy-go v1.23.0 - github.com/casbin/casbin/v2 v2.122.0 + github.com/casbin/casbin/v2 v2.127.0 github.com/coreos/go-oidc/v3 v3.15.0 - github.com/gin-gonic/gin v1.10.1 + github.com/gin-gonic/gin v1.11.0 github.com/google/uuid v1.6.0 github.com/gorilla/mux v1.8.1 github.com/iris-contrib/middleware/cors v0.0.0-20250207234507-372f6828ef8c @@ -31,10 +31,10 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spf13/viper v1.21.0 github.com/stretchr/testify v1.11.1 - golang.org/x/crypto v0.42.0 + golang.org/x/crypto v0.45.0 golang.org/x/oauth2 v0.31.0 - google.golang.org/grpc v1.75.0 - google.golang.org/protobuf v1.36.8 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.9 ) require ( @@ -50,26 +50,26 @@ require ( github.com/Shopify/goreferrer v0.0.0-20250513162709-b78e2829e40b // indirect github.com/andybalholm/brotli v1.1.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.29.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.38.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.7 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/bmatcuk/doublestar/v4 v4.8.1 // indirect - github.com/bytedance/sonic v1.13.2 // indirect - github.com/bytedance/sonic/loader v0.2.4 // indirect + github.com/bytedance/sonic v1.14.0 // indirect + github.com/bytedance/sonic/loader v0.3.0 // indirect github.com/casbin/govaluate v1.6.0 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cloudwego/base64x v0.1.5 // indirect + github.com/cloudwego/base64x v0.1.6 // indirect github.com/containerd/continuity v0.4.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect @@ -85,11 +85,13 @@ require ( github.com/gin-contrib/sse v1.1.0 // indirect github.com/go-jose/go-jose/v3 v3.0.4 // indirect github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect - github.com/go-playground/validator/v10 v10.26.0 // indirect + github.com/go-playground/validator/v10 v10.27.0 // indirect github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/goccy/go-json v0.10.5 // indirect + github.com/goccy/go-yaml v1.18.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect github.com/golang/snappy v1.0.0 // indirect @@ -106,7 +108,7 @@ require ( github.com/kataras/sitemap v0.0.6 // indirect github.com/kataras/tunnel v0.0.4 // indirect github.com/klauspost/compress v1.18.0 // indirect - github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/klauspost/cpuid/v2 v2.3.0 // indirect github.com/kr/fs v0.1.0 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/lestrrat-go/blackmagic v1.0.3 // indirect @@ -114,6 +116,7 @@ require ( github.com/lestrrat-go/httprc v1.0.6 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect github.com/lestrrat-go/option v1.0.1 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailgun/raymond/v2 v2.0.48 // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -126,14 +129,18 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runc v1.2.3 // indirect + github.com/opencontainers/runc v1.2.8 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/quic-go/qpack v0.5.1 // indirect + github.com/quic-go/quic-go v0.54.1 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/schollz/closestmatch v2.1.0+incompatible // indirect github.com/segmentio/asm v1.2.0 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/spf13/afero v1.15.0 // indirect github.com/spf13/cast v1.10.0 // indirect @@ -141,8 +148,10 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/tdewolff/minify/v2 v2.23.6 // indirect github.com/tdewolff/parse/v2 v2.8.1 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect - github.com/ugorji/go/codec v1.2.12 // indirect + github.com/ugorji/go/codec v1.3.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect @@ -150,13 +159,18 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/yosssi/ace v0.0.5 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.uber.org/mock v0.5.0 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/arch v0.17.0 // indirect + golang.org/x/arch v0.20.0 // indirect golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/text v0.29.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.38.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/sda/go.sum b/sda/go.sum index 6e98f3a96..25da9c612 100644 --- a/sda/go.sum +++ b/sda/go.sum @@ -24,42 +24,42 @@ github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= -github.com/aws/aws-sdk-go-v2 v1.39.0 h1:xm5WV/2L4emMRmMjHFykqiA4M/ra0DJVSWUkDyBjbg4= -github.com/aws/aws-sdk-go-v2 v1.39.0/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo= +github.com/aws/aws-sdk-go-v2 v1.32.8/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= -github.com/aws/aws-sdk-go-v2/config v1.31.7 h1:zS1O6hr6t0nZdBCMFc/c9OyZFyLhXhf/B2IZ9Y0lRQE= -github.com/aws/aws-sdk-go-v2/config v1.31.7/go.mod h1:GpHmi1PQDdL5pP4JaB00pU0ek4EXVcYH7IkjkUadQmM= -github.com/aws/aws-sdk-go-v2/credentials v1.18.11 h1:1Fnb+7Dk96/VYx/uYfzk5sU2V0b0y2RWZROiMZCN/Io= -github.com/aws/aws-sdk-go-v2/credentials v1.18.11/go.mod h1:iuvn9v10dkxU4sDgtTXGWY0MrtkEcmkUmjv4clxhuTc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7 h1:Is2tPmieqGS2edBnmOJIbdvOA6Op+rRpaYR60iBAwXM= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.7/go.mod h1:F1i5V5421EGci570yABvpIXgRIBPb5JM+lSkHF6Dq5w= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.5 h1:fSuJX/VBJKufwJG/szWgUdRJVyRiEQDDXNh/6NPrTBg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.5/go.mod h1:LvN0noQuST+3Su55Wl++BkITpptnfN9g6Ohkv4zs9To= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7 h1:UCxq0X9O3xrlENdKf1r9eRJoKz/b0AfGkpp3a7FPlhg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.7/go.mod h1:rHRoJUNUASj5Z/0eqI4w32vKvC7atoWR0jC+IkmVH8k= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7 h1:Y6DTZUn7ZUC4th9FMBbo8LVE+1fyq3ofw+tRwkUd3PY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.7/go.mod h1:x3XE6vMnU9QvHN/Wrx2s44kwzV2o2g5x/siw4ZUJ9g8= +github.com/aws/aws-sdk-go-v2/config v1.28.11 h1:7Ekru0IkRHRnSRWGQLnLN6i0o1Jncd0rHo2T130+tEQ= +github.com/aws/aws-sdk-go-v2/config v1.28.11/go.mod h1:x78TpPvBfHH16hi5tE3OCWQ0pzNfyXA349p5/Wp82Yo= +github.com/aws/aws-sdk-go-v2/credentials v1.17.52 h1:I4ymSk35LHogx2Re2Wu6LOHNTRaRWkLVoJgWS5Wd40M= +github.com/aws/aws-sdk-go-v2/credentials v1.17.52/go.mod h1:vAkqKbMNUcher8fDXP2Ge2qFXKMkcD74qvk1lJRMemM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 h1:IBAoD/1d8A8/1aA8g4MBVtTRHhXRiNAgwdbo/xRM2DI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23/go.mod h1:vfENuCM7dofkgKpYzuzf1VT1UKkA/YL3qanfBn7HCaA= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48 h1:XnXVe2zRyPf0+fAW5L05esmngvBpC6DQZK7oZB/z/Co= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48/go.mod h1:S3wey90OrS4f7kYxH6PT175YyEcHTORY07++HurMaRM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 h1:jSJjSBzw8VDIbWv+mmvBSP8ezsztMYJGH+eKqi9AmNs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27/go.mod h1:/DAhLbFRgwhmvJdOfSm+WwikZrCuUJiA4WgJG0fTNSw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 h1:l+X4K77Dui85pIj5foXDhPlnqcNRG2QUyvca300lXh8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27/go.mod h1:KvZXSFEXm6x84yE8qffKvT3x8J5clWnVFXphpohhzJ8= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7 h1:BszAktdUo2xlzmYHjWMq70DqJ7cROM8iBd3f6hrpuMQ= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.7/go.mod h1:XJ1yHki/P7ZPuG4fd3f0Pg/dSGA2cTQBCLw82MH2H48= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 h1:AmB5QxnD+fBFrg9LcqzkgF/CaYvMyU/BTlejG4t1S7Q= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27/go.mod h1:Sai7P3xTiyv9ZUYO3IFxMnmiIP759/67iQbU4kdmkyU= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7 h1:zmZ8qvtE9chfhBPuKB2aQFxW5F/rpwXUgmcVCgQzqRw= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.7/go.mod h1:vVYfbpd2l+pKqlSIDIOgouxNsGu5il9uDp0ooWb0jys= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7 h1:mLgc5QIgOy26qyh5bvW+nDoAppxgn3J2WV3m9ewq7+8= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.7/go.mod h1:wXb/eQnqt8mDQIQTTmcw58B5mYGxzLGZGK8PWNFZ0BA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7 h1:u3VbDKUCWarWiU+aIUK4gjTr/wQFXV17y3hgNno9fcA= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.7/go.mod h1:/OuMQwhSyRapYxq6ZNpPer8juGNrB4P5Oz8bZ2cgjQE= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.0 h1:k5JXPr+2SrPDwM3PdygZUenn0lVPLa3KOs7cCYqinFs= -github.com/aws/aws-sdk-go-v2/service/s3 v1.88.0/go.mod h1:xajPTguLoeQMAOE44AAP2RQoUhF8ey1g5IFHARv71po= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.2 h1:rcoTaYOhGE/zfxE1uR6X5fvj+uKkqeCNRE0rBbiQM34= -github.com/aws/aws-sdk-go-v2/service/sso v1.29.2/go.mod h1:Ql6jE9kyyWI5JHn+61UT/Y5Z0oyVJGmgmJbZD5g4unY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.3 h1:BSIfeFtU9tlSt8vEYS7KzurMoAuYzYPWhcZiMtxVf2M= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.3/go.mod h1:XclEty74bsGBCr1s0VSaA11hQ4ZidK4viWK7rRfO88I= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.3 h1:yEiZ0ztgji2GsCb/6uQSITXcGdtmWMfLRys0jJFiUkc= -github.com/aws/aws-sdk-go-v2/service/sts v1.38.3/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 h1:iwYS40JnrBeA9e9aI5S6KKN4EB2zR4iUVYN0nwVivz4= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8/go.mod h1:Fm9Mi+ApqmFiknZtGpohVcBGvpTu542VC4XO9YudRi0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 h1:cWno7lefSH6Pp+mSznagKCgfDGeZRin66UvYUqAkyeA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8/go.mod h1:tPD+VjU3ABTBoEJ3nctu5Nyg4P4yjqSH5bJGGkY4+XE= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 h1:/Mn7gTedG86nbpjT4QEKsN1D/fThiYe1qvq7WsBGNHg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8/go.mod h1:Ae3va9LPmvjj231ukHB6UeT8nS7wTPfC3tMZSZMwNYg= +github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 h1:a7aQ3RW+ug4IbhoQp29NZdc7vqrzKZZfWZSaQAXOZvQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2/go.mod h1:xMekrnhmJ5aqmyxtmALs7mlvXw5xRh+eYjOjvrIIFJ4= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 h1:YqtxripbjWb2QLyzRK9pByfEDvgg95gpC2AyDq4hFE8= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.9/go.mod h1:lV8iQpg6OLOfBnqbGMBKYjilBlf633qwHnBEiMSPoHY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 h1:6dBT1Lz8fK11m22R+AqfRsFn8320K0T5DTGxxOQBSMw= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8/go.mod h1:/kiBvRQXBc6xeJTYzhSdGvJ5vm1tjaDEjH+MSeRJnlY= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.7 h1:qwGa9MA8G7mBq2YphHFaygdPe5t9OA7SvaJdwWTlEds= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.7/go.mod h1:+8h7PZb3yY5ftmVLD7ocEoE98hdc8PoKS0H3wfx1dlc= github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= @@ -67,13 +67,12 @@ github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd3 github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38= github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/bytedance/sonic v1.13.2 h1:8/H1FempDZqC4VqjptGo14QQlJx8VdZJegxs6wwfqpQ= -github.com/bytedance/sonic v1.13.2/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4= -github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU= -github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY= -github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= -github.com/casbin/casbin/v2 v2.122.0 h1:T960HruD6W3JZmoYQjKAu0YBLUL++5l7LormNJIcmkc= -github.com/casbin/casbin/v2 v2.122.0/go.mod h1:Ee33aqGrmES+GNL17L0h9X28wXuo829wnNUnS0edAco= +github.com/bytedance/sonic v1.14.0 h1:/OfKt8HFw0kh2rj8N0F6C/qPGRESq0BbaNZgcNXXzQQ= +github.com/bytedance/sonic v1.14.0/go.mod h1:WoEbx8WTcFJfzCe0hbmyTGrfjt8PzNEBdxlNUO24NhA= +github.com/bytedance/sonic/loader v0.3.0 h1:dskwH8edlzNMctoruo8FPTJDF3vLtDT0sXZwvZJyqeA= +github.com/bytedance/sonic/loader v0.3.0/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI= +github.com/casbin/casbin/v2 v2.127.0 h1:UGK3uO/8cOslnNqFUJ4xzm/bh+N+o45U7cSolaFk38c= +github.com/casbin/casbin/v2 v2.127.0/go.mod h1:n4uZK8+tCMvcD6EVQZI90zKAok8iHAvEypcMJVKhGF0= github.com/casbin/govaluate v1.3.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= github.com/casbin/govaluate v1.6.0 h1:HXWFa4di61X4iZrWLJdpot/v1UkKUG+pPmcu7wWRxrI= github.com/casbin/govaluate v1.6.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= @@ -81,9 +80,8 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4= -github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w= -github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY= +github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M= +github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/coreos/go-oidc/v3 v3.15.0 h1:R6Oz8Z4bqWR7VFQ+sPSvZPQv4x8M+sJkDO5ojgwlyAg= @@ -121,8 +119,8 @@ github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBv github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w= github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM= -github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ= -github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= +github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk= +github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls= github.com/go-jose/go-jose/v3 v3.0.4 h1:Wp5HA7bLQcKnf6YYao/4kpRpVMp/yf6+pJKV8WFSaNY= github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= @@ -131,14 +129,16 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k= -github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= +github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= +github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= @@ -147,6 +147,8 @@ github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= @@ -159,6 +161,7 @@ github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomarkdown/markdown v0.0.0-20250311123330-531bef5e742b h1:EY/KpStFl60qA17CptGXhwfZ+k1sFNJIUNR8DdbcuUk= github.com/gomarkdown/markdown v0.0.0-20250311123330-531bef5e742b/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -210,10 +213,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= -github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= -github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M= +github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y= +github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= @@ -239,6 +240,8 @@ github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNB github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/mailgun/raymond/v2 v2.0.48 h1:5dmlB680ZkFG2RN/0lvTAghrSxIESeu9/2aeDqACtjw= github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= @@ -281,8 +284,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runc v1.2.3 h1:fxE7amCzfZflJO2lHXf4y/y8M1BoAqp+FVmG19oYB80= -github.com/opencontainers/runc v1.2.3/go.mod h1:nSxcWUydXrsBZVYNSkTjoQ/N6rcyTtn+1SD5D4+kRIM= +github.com/opencontainers/runc v1.2.8 h1:RnEICeDReapbZ5lZEgHvj7E9Q3Eex9toYmaGBsbvU5Q= +github.com/opencontainers/runc v1.2.8/go.mod h1:cC0YkmZcuvr+rtBZ6T7NBoVbMGNAdLa/21vIElJDOzI= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest/v3 v3.12.0 h1:3oV9d0sDzlSQfHtIaB5k6ghUCVMVLpAY8hwrqoCyRCw= @@ -296,10 +299,16 @@ github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkk github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI= +github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg= +github.com/quic-go/quic-go v0.54.1 h1:4ZAWm0AhCb6+hE+l5Q1NAL0iRn/ZrMwqHRGQiFwj2eg= +github.com/quic-go/quic-go v0.54.1/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY= github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= @@ -314,6 +323,10 @@ github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -342,6 +355,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= @@ -352,10 +367,14 @@ github.com/tdewolff/parse/v2 v2.8.1 h1:J5GSHru6o3jF1uLlEKVXkDxxcVx6yzOlIVIotK4w2 github.com/tdewolff/parse/v2 v2.8.1/go.mod h1:Hwlni2tiVNKyzR1o6nUs4FOF07URA+JLBLd6dlIXYqo= github.com/tdewolff/test v1.0.11 h1:FdLbwQVHxqG16SlkGveC0JVyrJN62COWTRyUFzfbtBE= github.com/tdewolff/test v1.0.11/go.mod h1:XPuWBzvdUzhCuxWO1ojpXsyzsA5bFoS3tO/Q3kFuTG8= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE= -github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= +github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= @@ -383,6 +402,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= @@ -397,10 +418,12 @@ go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mx go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= +go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= -golang.org/x/arch v0.17.0 h1:4O3dfLzd+lQewptAHqjewQZQDyEdejz3VwgeYwkZneU= -golang.org/x/arch v0.17.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= +golang.org/x/arch v0.20.0 h1:dx1zTU0MAE98U+TQ8BLl7XsJbgze2WnNKF/8tGp/Q6c= +golang.org/x/arch v0.20.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -410,8 +433,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI= golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -422,6 +445,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -437,8 +462,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.31.0 h1:8Fq0yVZLh4j4YA47vHKFTa9Ew5XIrCP8LC6UeNZnLxo= golang.org/x/oauth2 v0.31.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -451,12 +476,16 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -467,12 +496,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -482,8 +512,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= -golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -494,8 +524,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -509,6 +539,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -517,10 +549,10 @@ gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= -google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.9 h1:w2gp2mA27hUeUzj9Ex9FBjsBm40zfaDtEWow293U7Iw= +google.golang.org/protobuf v1.36.9/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -539,4 +571,3 @@ gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= -nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50= diff --git a/sda/internal/broker/broker.go b/sda/internal/broker/broker.go index 48a9c094e..c3807a7f1 100644 --- a/sda/internal/broker/broker.go +++ b/sda/internal/broker/broker.go @@ -141,7 +141,7 @@ func NewMQ(config MQConf) (*AMQPBroker, error) { } if e := channel.Confirm(false); e != nil { - fmt.Printf("channel could not be put into confirm mode: %s", e) + _, _ = fmt.Printf("channel could not be put into confirm mode: %s", e) return nil, fmt.Errorf("channel could not be put into confirm mode: %s", e) } diff --git a/sda/internal/broker/broker_test.go b/sda/internal/broker/broker_test.go index 99c61420d..843e74b9d 100644 --- a/sda/internal/broker/broker_test.go +++ b/sda/internal/broker/broker_test.go @@ -86,7 +86,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { @@ -103,7 +103,7 @@ func TestMain(m *testing.M) { log.Panicf("Could not purge resource: %s", err) } - os.RemoveAll(certPath) + _ = os.RemoveAll(certPath) os.Exit(code) } diff --git a/sda/internal/config/config.go b/sda/internal/config/config.go index 941c2b265..58a08e2e9 100644 --- a/sda/internal/config/config.go +++ b/sda/internal/config/config.go @@ -50,6 +50,7 @@ type Config struct { SyncAPI SyncAPIConf ReEncrypt ReEncConfig Auth AuthConf + RotateKey RotateKeyConf } type Grpc struct { @@ -70,6 +71,11 @@ type ReEncConfig struct { Timeout int } +type RotateKeyConf struct { + Grpc Grpc + PublicKey *[32]byte +} + type Sync struct { CenterPrefix string Destination storage.Conf @@ -77,6 +83,7 @@ type Sync struct { RemotePassword string RemotePort int RemoteUser string + PublicKey *[32]byte } type SyncAPIConf struct { @@ -389,6 +396,21 @@ func NewConfig(app string) (*Config, error) { requiredConfVars = []string{ "c4gh.privateKeys", } + case "rotatekey": + requiredConfVars = []string{ + "broker.host", + "broker.port", + "broker.user", + "broker.password", + "broker.queue", + "c4gh.rotatePubKeyPath", + "db.host", + "db.port", + "db.user", + "db.password", + "db.database", + "grpc.host", + } case "s3inbox": requiredConfVars = []string{ "broker.host", @@ -650,6 +672,27 @@ func NewConfig(app string) (*Config, error) { if err != nil { return nil, err } + case "rotatekey": + if err := c.configBroker(); err != nil { + return nil, err + } + + err := c.configDatabase() + if err != nil { + return nil, err + } + + c.configSchemas() + + c.RotateKey.Grpc, err = configReEncryptClient() + if err != nil { + return nil, err + } + + c.RotateKey.PublicKey, err = GetC4GHPublicKey(viper.GetString("c4gh.rotatePubKeyPath")) + if err != nil { + return nil, err + } case "s3inbox": err := c.configBroker() if err != nil { @@ -672,7 +715,8 @@ func NewConfig(app string) (*Config, error) { return nil, err } - if err := c.configDatabase(); err != nil { + err := c.configDatabase() + if err != nil { return nil, err } @@ -713,6 +757,7 @@ func NewConfig(app string) (*Config, error) { return c, nil } +// configAPI provides configuration for the api web server func (c *Config) configAPI() error { c.apiDefaults() api := APIConf{} @@ -1127,10 +1172,16 @@ func (c *Config) configSync() error { c.Sync.RemoteUser = viper.GetString("sync.remote.user") c.Sync.CenterPrefix = viper.GetString("sync.centerPrefix") + var err error + c.Sync.PublicKey, err = GetC4GHPublicKey(viper.GetString("c4gh.syncPubKeyPath")) + if err != nil { + return err + } + return nil } -// configSync provides configuration for the outgoing sync settings +// configSyncAPI provides configuration for the outgoing sync settings func (c *Config) configSyncAPI() { c.SyncAPI = SyncAPIConf{} c.SyncAPI.APIPassword = viper.GetString("sync.api.password") @@ -1163,7 +1214,7 @@ func GetC4GHKey() (*[32]byte, error) { return nil, err } - keyFile.Close() + _ = keyFile.Close() return &key, nil } @@ -1185,7 +1236,7 @@ func GetC4GHprivateKeys() ([]*[32]byte, error) { } key, err := keys.ReadPrivateKey(keyFile, []byte(entry.Passphrase)) - keyFile.Close() + _ = keyFile.Close() if err != nil { return nil, fmt.Errorf("failed to read private key from %s: %v", entry.FilePath, err) } @@ -1197,8 +1248,7 @@ func GetC4GHprivateKeys() ([]*[32]byte, error) { } // GetC4GHPublicKey reads the c4gh public key -func GetC4GHPublicKey() (*[32]byte, error) { - keyPath := viper.GetString("c4gh.syncPubKeyPath") +func GetC4GHPublicKey(keyPath string) (*[32]byte, error) { // Make sure the key path and passphrase is valid keyFile, err := os.Open(keyPath) if err != nil { @@ -1210,7 +1260,7 @@ func GetC4GHPublicKey() (*[32]byte, error) { return nil, err } - keyFile.Close() + _ = keyFile.Close() return &key, nil } diff --git a/sda/internal/config/config_test.go b/sda/internal/config/config_test.go index 4cde11881..04f200389 100644 --- a/sda/internal/config/config_test.go +++ b/sda/internal/config/config_test.go @@ -20,6 +20,7 @@ import ( type ConfigTestSuite struct { suite.Suite + pubKeyPath string } var certPath, rootDir string @@ -67,6 +68,11 @@ func (ts *ConfigTestSuite) SetupTest() { viper.Set("inbox.type", "s3") viper.Set("server.jwtpubkeypath", "testpath") viper.Set("log.level", "debug") + + pubKey := "-----BEGIN CRYPT4GH PUBLIC KEY-----\nuQO46R56f/Jx0YJjBAkZa2J6n72r6HW/JPMS4tfepBs=\n-----END CRYPT4GH PUBLIC KEY-----" + ts.pubKeyPath, _ = os.MkdirTemp("", "pubkey") + err = os.WriteFile(ts.pubKeyPath+"/c4gh.pub", []byte(pubKey), 0600) + assert.NoError(ts.T(), err) } func (ts *ConfigTestSuite) TearDownTest() { @@ -303,7 +309,7 @@ func (ts *ConfigTestSuite) TestSyncConfig() { viper.Set("sync.remote.password", "test") viper.Set("c4gh.filepath", "/keys/key") viper.Set("c4gh.passphrase", "pass") - viper.Set("c4gh.syncPubKeyPath", "/keys/recipient") + viper.Set("c4gh.syncPubKeyPath", ts.pubKeyPath+"/c4gh.pub") config, err = NewConfig("sync") assert.NotNil(ts.T(), config) assert.NoError(ts.T(), err) @@ -325,24 +331,52 @@ func (ts *ConfigTestSuite) TestSyncConfig() { assert.NotNil(ts.T(), config.Sync) assert.NotNil(ts.T(), config.Sync.Destination.Posix) assert.Equal(ts.T(), "test", config.Sync.Destination.Posix.Location) + + defer os.RemoveAll(ts.pubKeyPath) } -func (ts *ConfigTestSuite) TestGetC4GHPublicKey() { - pubKey := "-----BEGIN CRYPT4GH PUBLIC KEY-----\nuQO46R56f/Jx0YJjBAkZa2J6n72r6HW/JPMS4tfepBs=\n-----END CRYPT4GH PUBLIC KEY-----" - pubKeyPath, _ := os.MkdirTemp("", "pubkey") - err := os.WriteFile(pubKeyPath+"/c4gh.pub", []byte(pubKey), 0600) + +func (ts *ConfigTestSuite) TestRotateKeyConfig() { + ts.SetupTest() + // At this point we should fail because we lack configuration + config, err := NewConfig("rotatekey") + assert.Error(ts.T(), err) + assert.Nil(ts.T(), config) + + viper.Set("c4gh.rotatePubKeyPath", ts.pubKeyPath+"/c4gh.pub") + config, err = NewConfig("rotatekey") + assert.NotNil(ts.T(), config) assert.NoError(ts.T(), err) + assert.NotNil(ts.T(), config.Broker) + assert.Equal(ts.T(), "testhost", config.Broker.Host) + assert.Equal(ts.T(), 123, config.Broker.Port) + assert.Equal(ts.T(), "testuser", config.Broker.User) + assert.Equal(ts.T(), "testpassword", config.Broker.Password) + assert.Equal(ts.T(), "routingtest", config.Broker.RoutingKey) + assert.NotNil(ts.T(), config.Database) + assert.Equal(ts.T(), "test", config.Database.Host) + assert.Equal(ts.T(), 123, config.Database.Port) + assert.Equal(ts.T(), "test", config.Database.User) + assert.Equal(ts.T(), "test", config.Database.Password) + assert.Equal(ts.T(), "test", config.Database.Database) + assert.NotNil(ts.T(), config.RotateKey) + assert.NotNil(ts.T(), config.RotateKey.Grpc) + assert.Equal(ts.T(), "reencrypt", config.RotateKey.Grpc.Host) + defer os.RemoveAll(ts.pubKeyPath) +} + +func (ts *ConfigTestSuite) TestGetC4GHPublicKey() { var kb [32]byte k, _ := base64.StdEncoding.DecodeString("uQO46R56f/Jx0YJjBAkZa2J6n72r6HW/JPMS4tfepBs=") copy(kb[:], k) - viper.Set("c4gh.syncPubKeyPath", pubKeyPath+"/c4gh.pub") - pkBytes, err := GetC4GHPublicKey() + viper.Set("c4gh.syncPubKeyPath", ts.pubKeyPath+"/c4gh.pub") + pkBytes, err := GetC4GHPublicKey(ts.pubKeyPath + "/c4gh.pub") assert.NoError(ts.T(), err) assert.NotNil(ts.T(), pkBytes) assert.Equal(ts.T(), pkBytes, &kb, "GetC4GHPublicKey didn't return correct pubKey") - defer os.RemoveAll(pubKeyPath) + defer os.RemoveAll(ts.pubKeyPath) } func (ts *ConfigTestSuite) TestGetC4GHKey() { key := "-----BEGIN CRYPT4GH ENCRYPTED PRIVATE KEY-----\nYzRnaC12MQAGc2NyeXB0ABQAAAAAEna8op+BzhTVrqtO5Rx7OgARY2hhY2hhMjBfcG9seTEzMDUAPMx2Gbtxdva0M2B0tb205DJT9RzZmvy/9ZQGDx9zjlObj11JCqg57z60F0KhJW+j/fzWL57leTEcIffRTA==\n-----END CRYPT4GH ENCRYPTED PRIVATE KEY-----" diff --git a/sda/internal/database/database.go b/sda/internal/database/database.go index bb00a755f..6e94c69ff 100644 --- a/sda/internal/database/database.go +++ b/sda/internal/database/database.go @@ -61,6 +61,11 @@ type DatasetInfo struct { Timestamp string `json:"timeStamp"` } +type FileDetails struct { + User string + Path string +} + // SchemaName is the name of the remote database schema to query var SchemaName = "sda" @@ -196,7 +201,7 @@ func (dbs *SDAdb) checkAndReconnectIfNeeded() { } func (dbs *SDAdb) Reconnect() { - dbs.DB.Close() + _ = dbs.DB.Close() dbs.DB, _ = sql.Open(dbs.Config.PgDataSource()) } @@ -208,6 +213,6 @@ func (dbs *SDAdb) Close() { err := dbs.DB.Ping() if err == nil { log.Info("Closing database connection") - dbs.DB.Close() + _ = dbs.DB.Close() } } diff --git a/sda/internal/database/db_functions.go b/sda/internal/database/db_functions.go index 40372c8d9..99f754efc 100644 --- a/sda/internal/database/db_functions.go +++ b/sda/internal/database/db_functions.go @@ -19,48 +19,29 @@ import ( // RegisterFile inserts a file in the database, along with a "registered" log // event. If the file already exists in the database, the entry is updated, but // a new file event is always inserted. -func (dbs *SDAdb) RegisterFile(uploadPath, uploadUser string) (string, error) { +// If fileId is provided the new files table row will have that id, otherwise a new uuid will be generated +// If the unique unique_ingested constraint(submission_file_path, archive_file_path, submission_user) already exists +// and a different fileId is provided, the fileId in the database will NOT be updated. +func (dbs *SDAdb) RegisterFile(fileID *string, uploadPath, uploadUser string) (string, error) { dbs.checkAndReconnectIfNeeded() if dbs.Version < 4 { return "", errors.New("database schema v4 required for RegisterFile()") } - query := "SELECT sda.register_file($1, $2);" + query := "SELECT sda.register_file($1, $2, $3);" - var fileID string - - err := dbs.DB.QueryRow(query, uploadPath, uploadUser).Scan(&fileID) + var createdFileID string - return fileID, err -} - -func (dbs *SDAdb) GetFileID(corrID string) (string, error) { - var ( - err error - count int - ID string - ) - - for count == 0 || (err != nil && count < RetryTimes) { - ID, err = dbs.getFileID(corrID) - count++ + fileIDArg := sql.NullString{} + if fileID != nil { + fileIDArg.Valid = true + fileIDArg.String = *fileID } - return ID, err -} -func (dbs *SDAdb) getFileID(corrID string) (string, error) { - dbs.checkAndReconnectIfNeeded() - db := dbs.DB - const getFileID = "SELECT DISTINCT file_id FROM sda.file_event_log where correlation_id = $1;" - - var fileID string - err := db.QueryRow(getFileID, corrID).Scan(&fileID) - if err != nil { - return "", err - } + err := dbs.DB.QueryRow(query, fileIDArg, uploadPath, uploadUser).Scan(&createdFileID) - return fileID, nil + return createdFileID, err } // GetInboxFilePathFromID checks if a file exists in the database for a given user and fileID @@ -122,12 +103,17 @@ func (dbs *SDAdb) getFileIDByUserPathAndStatus(submissionUser, filePath, status dbs.checkAndReconnectIfNeeded() db := dbs.DB - const getFileID = `SELECT id from sda.files -WHERE submission_user=$1 and submission_file_path =$2 and stable_id IS null -AND EXISTS (SELECT 1 FROM -(SELECT event from sda.file_event_log JOIN sda.files ON sda.files.id=sda.file_event_log.file_id -WHERE submission_user=$1 and submission_file_path =$2 order by started_at desc limit 1) -AS subquery WHERE event = $3);` + const getFileID = ` +SELECT id_and_event.id +FROM ( + SELECT DISTINCT ON (f.id) f.id, fel.event FROM sda.files AS f + LEFT JOIN sda.file_event_log AS fel ON fel.file_id = f.id + WHERE f.submission_user = $1 + AND f.submission_file_path = $2 + AND f.stable_id IS NULL + ORDER BY f.id, fel.started_at DESC LIMIT 1 + ) AS id_and_event +WHERE id_and_event.event = $3;` var fileID string err := db.QueryRow(getFileID, submissionUser, filePath, status).Scan(&fileID) @@ -138,29 +124,79 @@ AS subquery WHERE event = $3);` return fileID, nil } +// CheckStableIDOwnedByUser checks if the file a stableID links to belongs to the user +// Returns true if a file is found by the stableID and user, false if not found +func (dbs *SDAdb) CheckStableIDOwnedByUser(stableID, user string) (bool, error) { + var ( + err error + found bool + ) + // 2, 4, 8, 16, 32 seconds between each retry event. + for count := 1; count <= RetryTimes; count++ { + found, err = dbs.checkStableIDOwnedByUser(stableID, user) + if err == nil || strings.Contains(err.Error(), "sql: no rows in result set") { + break + } + time.Sleep(time.Duration(math.Pow(2, float64(count))) * time.Second) + } + + return found, err +} + +func (dbs *SDAdb) checkStableIDOwnedByUser(stableID, user string) (bool, error) { + dbs.checkAndReconnectIfNeeded() + db := dbs.DB + + const checkFileFound = ` +SELECT true +FROM sda.files +WHERE stable_id = $1 +AND submission_user = $2` + + var found bool + if err := db.QueryRow(checkFileFound, stableID, user).Scan(&found); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + + return false, err + } + + return true, nil +} + // UpdateFileEventLog updates the status in of the file in the database. // The message parameter is the rabbitmq message sent on file upload. -func (dbs *SDAdb) UpdateFileEventLog(fileUUID, event, corrID, user, details, message string) error { +func (dbs *SDAdb) UpdateFileEventLog(fileUUID, event, user, details, message string) error { var ( err error count int ) for count == 0 || (err != nil && count < RetryTimes) { - err = dbs.updateFileEventLog(fileUUID, event, corrID, user, details, message) + err = dbs.updateFileEventLog(fileUUID, event, user, details, message) count++ } return err } -func (dbs *SDAdb) updateFileEventLog(fileUUID, event, corrID, user, details, message string) error { +func (dbs *SDAdb) updateFileEventLog(fileUUID, event, user, details, message string) error { dbs.checkAndReconnectIfNeeded() db := dbs.DB - const query = "INSERT INTO sda.file_event_log(file_id, event, correlation_id, user_id, details, message) VALUES($1, $2, $3, $4, $5, $6);" + const query = ` +INSERT INTO sda.file_event_log(file_id, event, user_id, details, message) +VALUES($1, $2, $3, $4, $5); +` - result, err := db.Exec(query, fileUUID, event, corrID, user, details, message) + result, err := db.Exec(query, fileUUID, event, user, details, message) if err != nil { + // 23503 error code == foreign_key_violation, meaning the files row does not exits + // http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html + if pqErr, ok := err.(*pq.Error); ok && pqErr.Code == "23503" { + return sql.ErrNoRows + } + return err } if rowsAffected, _ := result.RowsAffected(); rowsAffected == 0 { @@ -200,6 +236,39 @@ func (dbs *SDAdb) storeHeader(header []byte, id string) error { return nil } +// RotateHeader stores the file header in the database +func (dbs *SDAdb) RotateHeaderKey(header []byte, keyHash, fileID string) error { + var ( + err error + count int + ) + + for count == 0 || (err != nil && count < RetryTimes) { + err = dbs.rotateHeaderKey(header, keyHash, fileID) + count++ + } + + return err +} + +func (dbs *SDAdb) rotateHeaderKey(header []byte, keyHash, fileID string) error { + dbs.checkAndReconnectIfNeeded() + db := dbs.DB + + const query = "UPDATE sda.files SET header = $1, key_hash = $2 WHERE id = $3;" + + result, err := db.Exec(query, hex.EncodeToString(header), keyHash, fileID) + if err != nil { + return err + } + if rowsAffected, _ := result.RowsAffected(); rowsAffected == 0 { + return errors.New("something went wrong with the query zero rows were changed") + } + log.Debugf("Successfully set header and key hash for file %s", fileID) + + return nil +} + // SetArchived marks the file as 'ARCHIVED' func (dbs *SDAdb) SetArchived(file FileInfo, fileID string) error { var err error @@ -235,7 +304,7 @@ ON CONFLICT ON CONSTRAINT unique_checksum DO UPDATE SET checksum = EXCLUDED.chec return nil } -func (dbs *SDAdb) GetFileStatus(corrID string) (string, error) { +func (dbs *SDAdb) GetFileStatus(fileID string) (string, error) { var ( err error count int @@ -243,19 +312,19 @@ func (dbs *SDAdb) GetFileStatus(corrID string) (string, error) { ) for count == 0 || (err != nil && count < RetryTimes) { - status, err = dbs.getFileStatus(corrID) + status, err = dbs.getFileStatus(fileID) count++ } return status, err } -func (dbs *SDAdb) getFileStatus(corrID string) (string, error) { +func (dbs *SDAdb) getFileStatus(fileID string) (string, error) { dbs.checkAndReconnectIfNeeded() db := dbs.DB - const getFileID = "SELECT event from sda.file_event_log WHERE correlation_id = $1 ORDER BY id DESC LIMIT 1;" + const getFileID = "SELECT event from sda.file_event_log WHERE file_id = $1 ORDER BY id DESC LIMIT 1;" var status string - err := db.QueryRow(getFileID, corrID).Scan(&status) + err := db.QueryRow(getFileID, fileID).Scan(&status) if err != nil { return "", err } @@ -339,7 +408,7 @@ ON CONFLICT ON CONSTRAINT unique_checksum DO UPDATE SET checksum = EXCLUDED.chec } // GetArchived retrieves the location and size of archive -func (dbs *SDAdb) GetArchived(corrID string) (string, int, error) { +func (dbs *SDAdb) GetArchived(fileID string) (string, int, error) { var ( filePath string fileSize int @@ -348,13 +417,13 @@ func (dbs *SDAdb) GetArchived(corrID string) (string, int, error) { ) for count == 0 || (err != nil && count < RetryTimes) { - filePath, fileSize, err = dbs.getArchived(corrID) + filePath, fileSize, err = dbs.getArchived(fileID) count++ } return filePath, fileSize, err } -func (dbs *SDAdb) getArchived(corrID string) (string, int, error) { +func (dbs *SDAdb) getArchived(fileID string) (string, int, error) { dbs.checkAndReconnectIfNeeded() db := dbs.DB @@ -362,7 +431,7 @@ func (dbs *SDAdb) getArchived(corrID string) (string, int, error) { var filePath string var fileSize int - if err := db.QueryRow(query, corrID).Scan(&filePath, &fileSize); err != nil { + if err := db.QueryRow(query, fileID).Scan(&filePath, &fileSize); err != nil { return "", 0, err } @@ -442,6 +511,37 @@ func (dbs *SDAdb) setAccessionID(accessionID, fileID string) error { return nil } +// GetAccessionID returns the stable id of a file identified by its file_id +func (dbs *SDAdb) GetAccessionID(fileID string) (string, error) { + var ( + aID string + err error + ) + // 2, 4, 8, 16, 32 seconds between each retry event. + for count := 1; count <= RetryTimes; count++ { + aID, err = dbs.getAccessionID(fileID) + if err == nil { + break + } + time.Sleep(time.Duration(math.Pow(2, float64(count))) * time.Second) + } + + return aID, err +} +func (dbs *SDAdb) getAccessionID(fileID string) (string, error) { + dbs.checkAndReconnectIfNeeded() + db := dbs.DB + + const getAccessionID = "SELECT stable_id FROM sda.files WHERE id = $1;" + var aID string + err := db.QueryRow(getAccessionID, fileID).Scan(&aID) + if err != nil { + return "", err + } + + return aID, nil +} + // MapFilesToDataset maps a set of files to a dataset in the database func (dbs *SDAdb) MapFilesToDataset(datasetID string, accessionIDs []string) error { var err error @@ -766,14 +866,27 @@ func (dbs *SDAdb) getUserFiles(userID, pathPrefix string, allData bool) ([]*Subm db := dbs.DB // select all files (that are not part of a dataset) of the user, each one annotated with its latest event - const query = `SELECT f.id, f.submission_file_path, f.stable_id, e.event, f.created_at FROM sda.files f -LEFT JOIN (SELECT DISTINCT ON (file_id) file_id, started_at, event FROM sda.file_event_log ORDER BY file_id, started_at DESC) e ON f.id = e.file_id -WHERE f.submission_user = $1 and f.submission_file_path LIKE $2 -AND NOT EXISTS (SELECT 1 FROM sda.file_dataset d WHERE f.id = d.file_id);` + const query = ` +SELECT DISTINCT ON (f.id) f.id, f.submission_file_path, f.stable_id, fel.event, f.created_at FROM sda.files AS f + LEFT JOIN sda.file_event_log AS fel ON fel.file_id = f.id + LEFT JOIN sda.file_dataset AS fd ON fd.file_id = f.id +WHERE f.submission_user = $1 AND ($2::TEXT IS NULL OR substr(f.submission_file_path, 1, $3) = $2::TEXT) + AND fd.file_id IS NULL +ORDER BY f.id, fel.started_at DESC;` + + pathPrefixLen := 1 + pathPrefixArg := sql.NullString{} + if pathPrefix != "" { + pathPrefixLen = len(pathPrefix) + pathPrefixArg.Valid = true + pathPrefixArg.String = pathPrefix + } // nolint:rowserrcheck - rows, err := db.Query(query, userID, fmt.Sprintf("%s%%", pathPrefix)) + rows, err := db.Query(query, userID, pathPrefixArg, pathPrefixLen) if err != nil { + log.Errorf("Error querying user files: %v", err) + return nil, err } defer rows.Close() @@ -801,53 +914,6 @@ AND NOT EXISTS (SELECT 1 FROM sda.file_dataset d WHERE f.id = d.file_id);` return files, nil } -// get the correlation ID for a user-inbox_path combination -func (dbs *SDAdb) GetCorrID(user, path, accession string) (string, error) { - var ( - corrID string - err error - ) - // 2, 4, 8, 16, 32 seconds between each retry event. - for count := 1; count <= RetryTimes; count++ { - corrID, err = dbs.getCorrID(user, path, accession) - if err == nil || strings.Contains(err.Error(), "sql: no rows in result set") { - break - } - time.Sleep(time.Duration(math.Pow(2, float64(count))) * time.Second) - } - - return corrID, err -} -func (dbs *SDAdb) getCorrID(user, path, accession string) (string, error) { - dbs.checkAndReconnectIfNeeded() - db := dbs.DB - const query = `SELECT DISTINCT correlation_id FROM sda.file_event_log e -RIGHT JOIN sda.files f ON e.file_id = f.id -WHERE f.submission_file_path = $1 AND f.submission_user = $2 AND COALESCE(f.stable_id, '') = $3;` - - rows, err := db.Query(query, path, user, accession) - if err != nil { - return "", err - } - defer rows.Close() - - var corrID sql.NullString - for rows.Next() { - err := rows.Scan(&corrID) - if err != nil { - return "", err - } - if corrID.Valid { - return corrID.String, nil - } - } - if rows.Err() != nil { - return "", rows.Err() - } - - return "", errors.New("sql: no rows in result set") -} - // list all users with files not yet assigned to a dataset func (dbs *SDAdb) ListActiveUsers() ([]string, error) { dbs.checkAndReconnectIfNeeded() @@ -937,6 +1003,38 @@ func (dbs *SDAdb) addKeyHash(keyHash, keyDescription string) error { return nil } +// GetKeyHash wraps getKeyHash with exponential stand-off retries +func (dbs *SDAdb) GetKeyHash(fileID string) (string, error) { + var ( + keyHash string + err error + ) + // 2, 4, 8, 16, 32 seconds between each retry event. + for count := 1; count <= RetryTimes; count++ { + keyHash, err = dbs.getKeyHash(fileID) + if err == nil || strings.Contains(err.Error(), "sql: no rows in result set") { + break + } + time.Sleep(time.Duration(math.Pow(2, float64(count))) * time.Second) + } + + return keyHash, err +} + +// getKeyHash gets the c4gh key hash corresponding to the fileID in the files table +func (dbs *SDAdb) getKeyHash(fileID string) (string, error) { + dbs.checkAndReconnectIfNeeded() + db := dbs.DB + + const query = "SELECT key_hash from sda.files WHERE id = $1;" + var keyHash string + err := db.QueryRow(query, fileID).Scan(&keyHash) + if err != nil { + return "", err + } + + return keyHash, nil +} func (dbs *SDAdb) SetKeyHash(keyHash, fileID string) error { dbs.checkAndReconnectIfNeeded() db := dbs.DB @@ -1010,6 +1108,26 @@ func (dbs *SDAdb) DeprecateKeyHash(keyHash string) error { return nil } +// Check that a key hash exists in the database +func (dbs *SDAdb) CheckKeyHash(keyhash string) error { + hashes, err := dbs.ListKeyHashes() + if err != nil { + return err + } + + for n := range hashes { + if hashes[n].Hash == keyhash && hashes[n].DeprecatedAt == "" { + return nil + } + + if hashes[n].Hash == keyhash && hashes[n].DeprecatedAt != "" { + return errors.New("the c4gh key hash has been deprecated") + } + } + + return errors.New("the c4gh key hash is not registered") +} + // ListDatasets lists all datasets as well as the status func (dbs *SDAdb) ListDatasets() ([]*DatasetInfo, error) { dbs.checkAndReconnectIfNeeded() @@ -1033,7 +1151,7 @@ func (dbs *SDAdb) ListDatasets() ([]*DatasetInfo, error) { datasets = append(datasets, &di) } - rows.Close() + _ = rows.Close() return datasets, nil } @@ -1075,7 +1193,7 @@ func (dbs *SDAdb) ListUserDatasets(submissionUser string) ([]DatasetInfo, error) datasets = append(datasets, di) } - rows.Close() + _ = rows.Close() return datasets, nil } @@ -1136,6 +1254,30 @@ func (dbs *SDAdb) GetReVerificationData(accessionID string) (schema.IngestionVer return reVerify, nil } +func (dbs *SDAdb) GetReVerificationDataFromFileID(fileID string) (schema.IngestionVerification, error) { + dbs.checkAndReconnectIfNeeded() + db := dbs.DB + reVerify := schema.IngestionVerification{ReVerify: true, FileID: fileID} + + const query = "SELECT archive_file_path,submission_file_path,submission_user FROM sda.files where id = $1;" + err := db.QueryRow(query, fileID).Scan(&reVerify.ArchivePath, &reVerify.FilePath, &reVerify.User) + if err != nil { + return schema.IngestionVerification{}, err + } + + var checksum schema.Checksums + const archiveChecksum = "SELECT type,checksum from sda.checksums WHERE file_id = $1 AND source = 'ARCHIVED';" + if err := db.QueryRow(archiveChecksum, reVerify.FileID).Scan(&checksum.Type, &checksum.Value); err != nil { + log.Errorln(err.Error()) + + return schema.IngestionVerification{}, err + } + checksum.Type = strings.ToLower(checksum.Type) + reVerify.EncryptedChecksums = append(reVerify.EncryptedChecksums, checksum) + + return reVerify, nil +} + func (dbs *SDAdb) GetDecryptedChecksum(id string) (string, error) { dbs.checkAndReconnectIfNeeded() db := dbs.DB @@ -1174,3 +1316,37 @@ func (dbs *SDAdb) GetDatasetFiles(dataset string) ([]string, error) { return accessions, nil } + +// GetFileDetailsFromUUID() retrieves user, path and correlation id by giving the file UUID +func (dbs *SDAdb) GetFileDetailsFromUUID(fileUUID, event string) (FileDetails, error) { + var ( + info FileDetails + err error + ) + + for count := 0; count <= RetryTimes; count++ { + info, err = dbs.getFileDetailsFromUUID(fileUUID, event) + if err == nil { + break + } + time.Sleep(time.Duration(math.Pow(2, float64(count))) * time.Second) + } + + return info, err +} + +// getFileDetailsFromUUID() is the actual function performing work for GetUserAndPathFromUUID +func (dbs *SDAdb) getFileDetailsFromUUID(fileUUID, event string) (FileDetails, error) { + var info FileDetails + dbs.checkAndReconnectIfNeeded() + + const query = `SELECT f.submission_user, f.submission_file_path + from sda.files f + join sda.file_event_log fel on f.id = fel.file_id + WHERE f.id = $1 and fel.event=$2;` + if err := dbs.DB.QueryRow(query, fileUUID, event).Scan(&info.User, &info.Path); err != nil { + return FileDetails{}, err + } + + return info, nil +} diff --git a/sda/internal/database/db_functions_test.go b/sda/internal/database/db_functions_test.go index bbdbc7357..877f06fc0 100644 --- a/sda/internal/database/db_functions_test.go +++ b/sda/internal/database/db_functions_test.go @@ -2,6 +2,7 @@ package database import ( "crypto/sha256" + "encoding/hex" "fmt" "regexp" "time" @@ -19,7 +20,7 @@ func (suite *DatabaseTests) TestRegisterFile() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/file1.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/file1.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") // check that the returning fileID is a uuid @@ -37,22 +38,27 @@ func (suite *DatabaseTests) TestRegisterFile() { err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.file_event_log WHERE file_id=$1 AND event='registered')", fileID).Scan(&exists) assert.NoError(suite.T(), err, "Failed to check if registered file event exists") assert.True(suite.T(), exists, "RegisterFile() did not insert a row into sda.file_event_log with id: "+fileID) + + db.Close() } -func (suite *DatabaseTests) TestGetFileID() { +func (suite *DatabaseTests) TestRegisterFileWithID() { db, err := NewSDAdb(suite.dbConf) assert.NoError(suite.T(), err, "got %v when creating new connection", err) - fileID, err := db.RegisterFile("/testuser/file3.c4gh", "testuser") + insertedFileID := uuid.New().String() + fileID, err := db.RegisterFile(&insertedFileID, "/testuser/file3.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") - corrID := uuid.New().String() - err = db.UpdateFileEventLog(fileID, "uploaded", corrID, "testuser", "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", "testuser", "{}", "{}") assert.NoError(suite.T(), err, "failed to update file status") - fID, err := db.GetFileID(corrID) + fID, err := db.GetFileIDByUserPathAndStatus("testuser", "/testuser/file3.c4gh", "uploaded") assert.NoError(suite.T(), err, "GetFileId failed") + assert.Equal(suite.T(), insertedFileID, fileID) assert.Equal(suite.T(), fileID, fID) + + db.Close() } func (suite *DatabaseTests) TestUpdateFileEventLog() { @@ -60,16 +66,15 @@ func (suite *DatabaseTests) TestUpdateFileEventLog() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/file4.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/file4.c4gh", "testuser") assert.Nil(suite.T(), err, "failed to register file in database") - corrID := uuid.New().String() // Attempt to mark a file that doesn't exist as uploaded - err = db.UpdateFileEventLog("00000000-0000-0000-0000-000000000000", "uploaded", corrID, "testuser", "{}", "{}") + err = db.UpdateFileEventLog("00000000-0000-0000-0000-000000000000", "uploaded", "testuser", "{}", "{}") assert.NotNil(suite.T(), err, "Unknown file could be marked as uploaded in database") // mark file as uploaded - err = db.UpdateFileEventLog(fileID, "uploaded", corrID, "testuser", "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", "testuser", "{}", "{}") assert.NoError(suite.T(), err, "failed to set file as uploaded in database") exists := false @@ -77,6 +82,8 @@ func (suite *DatabaseTests) TestUpdateFileEventLog() { err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.file_event_log WHERE file_id=$1 AND event='uploaded')", fileID).Scan(&exists) assert.NoError(suite.T(), err, "Failed to check if uploaded file event exists") assert.True(suite.T(), exists, "UpdateFileEventLog() did not insert a row into sda.file_event_log with id: "+fileID) + + db.Close() } func (suite *DatabaseTests) TestStoreHeader() { @@ -84,7 +91,7 @@ func (suite *DatabaseTests) TestStoreHeader() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestStoreHeader.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestStoreHeader.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") err = db.StoreHeader([]byte{15, 45, 20, 40, 48}, fileID) @@ -93,6 +100,56 @@ func (suite *DatabaseTests) TestStoreHeader() { // store header for non existing entry err = db.StoreHeader([]byte{15, 45, 20, 40, 48}, "00000000-0000-0000-0000-000000000000") assert.EqualError(suite.T(), err, "something went wrong with the query zero rows were changed") + + db.Close() +} + +func (suite *DatabaseTests) TestRotateHeaderKey() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got %v when creating new connection", err) + + // Register a new key and a new file + fileID, err := db.RegisterFile(nil, "/testuser/file1.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + err = db.addKeyHash("someKeyHash", "this is a test key") + assert.NoError(suite.T(), err, "failed to register key in database") + err = db.StoreHeader([]byte{15, 45, 20, 40, 48}, fileID) + assert.NoError(suite.T(), err, "failed to store file header") + + // test happy path + newKeyHex := `6af1407abc74656b8913a7d323c4bfd30bf7c8ca359f74ae35357acef29dc507` + err = db.addKeyHash(newKeyHex, "new key") + assert.NoError(suite.T(), err, "failed to register key in database") + newHHeader := []byte{1, 2, 3} + + err = db.RotateHeaderKey(newHHeader, newKeyHex, fileID) + assert.NoError(suite.T(), err) + + // Verify that the key+header were updated + var dbHeaderString, dbKeyHash string + err = db.DB.QueryRow("SELECT header, key_hash FROM sda.files WHERE id=$1", fileID).Scan(&dbHeaderString, &dbKeyHash) + assert.NoError(suite.T(), err) + dbHeader, err := hex.DecodeString(dbHeaderString) + assert.NoError(suite.T(), err, "hex decoding of rotated header failed") + assert.Equal(suite.T(), newHHeader, dbHeader) + assert.Equal(suite.T(), newKeyHex, dbKeyHash) + + // case of non registered keyhash + err = db.RotateHeaderKey([]byte{2, 4, 6, 8}, "unknownKeyHash", fileID) + assert.ErrorContains(suite.T(), err, "violates foreign key constraint") + // check that no column was updated + err = db.DB.QueryRow("SELECT header, key_hash FROM sda.files WHERE id=$1", fileID).Scan(&dbHeaderString, &dbKeyHash) + assert.NoError(suite.T(), err) + dbHeader, err = hex.DecodeString(dbHeaderString) + assert.NoError(suite.T(), err, "hex decoding of rotated header failed") + assert.Equal(suite.T(), newHHeader, dbHeader) + assert.Equal(suite.T(), newKeyHex, dbKeyHash) + + // case of non existing entry + err = db.RotateHeaderKey([]byte{15, 45, 20, 40, 48}, "keyHex", "00000000-0000-0000-0000-000000000000") + assert.EqualError(suite.T(), err, "something went wrong with the query zero rows were changed") + + db.Close() } func (suite *DatabaseTests) TestSetArchived() { @@ -100,7 +157,7 @@ func (suite *DatabaseTests) TestSetArchived() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestSetArchived.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestSetArchived.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New()), 1000, "/tmp/TestSetArchived.c4gh", fmt.Sprintf("%x", sha256.New()), -1, fmt.Sprintf("%x", sha256.New())} @@ -112,6 +169,8 @@ func (suite *DatabaseTests) TestSetArchived() { err = db.SetArchived(fileInfo, fileID) assert.NoError(suite.T(), err, "failed to mark file as Archived") + + db.Close() } func (suite *DatabaseTests) TestGetFileStatus() { @@ -119,16 +178,17 @@ func (suite *DatabaseTests) TestGetFileStatus() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestGetFileStatus.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetFileStatus.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") - corrID := uuid.New().String() - err = db.UpdateFileEventLog(fileID, "downloaded", corrID, "testuser", "{}", "{}") + err = db.UpdateFileEventLog(fileID, "downloaded", "testuser", "{}", "{}") assert.NoError(suite.T(), err, "failed to set file as downloaded in database") - status, err := db.GetFileStatus(corrID) + status, err := db.GetFileStatus(fileID) assert.NoError(suite.T(), err, "failed to get file status") assert.Equal(suite.T(), "downloaded", status) + + db.Close() } func (suite *DatabaseTests) TestGetHeader() { @@ -136,7 +196,7 @@ func (suite *DatabaseTests) TestGetHeader() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestGetHeader.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetHeader.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") err = db.StoreHeader([]byte{15, 45, 20, 40, 48}, fileID) @@ -145,6 +205,8 @@ func (suite *DatabaseTests) TestGetHeader() { header, err := db.GetHeader(fileID) assert.NoError(suite.T(), err, "failed to get file header") assert.Equal(suite.T(), []byte{15, 45, 20, 40, 48}, header) + + db.Close() } func (suite *DatabaseTests) TestSetVerified() { @@ -152,7 +214,7 @@ func (suite *DatabaseTests) TestSetVerified() { assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestSetVerified.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestSetVerified.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New()), 1000, "/testuser/TestSetVerified.c4gh", fmt.Sprintf("%x", sha256.New()), 948, fmt.Sprintf("%x", sha256.New())} @@ -161,6 +223,8 @@ func (suite *DatabaseTests) TestSetVerified() { err = db.SetVerified(fileInfo, fileID) assert.NoError(suite.T(), err, "got (%v) when marking file as verified", err) + + db.Close() } func (suite *DatabaseTests) TestGetArchived() { @@ -168,7 +232,7 @@ func (suite *DatabaseTests) TestGetArchived() { assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestGetArchived.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetArchived.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New()), 1000, "/tmp/TestGetArchived.c4gh", fmt.Sprintf("%x", sha256.New()), 987, fmt.Sprintf("%x", sha256.New())} @@ -182,6 +246,8 @@ func (suite *DatabaseTests) TestGetArchived() { assert.NoError(suite.T(), err, "got (%v) when getting file archive information", err) assert.Equal(suite.T(), 1000, fileSize) assert.Equal(suite.T(), "/tmp/TestGetArchived.c4gh", filePath) + + db.Close() } func (suite *DatabaseTests) TestSetAccessionID() { @@ -189,7 +255,7 @@ func (suite *DatabaseTests) TestSetAccessionID() { assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestSetAccessionID.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestSetAccessionID.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New()), 1000, "/tmp/TestSetAccessionID.c4gh", fmt.Sprintf("%x", sha256.New()), 987, fmt.Sprintf("%x", sha256.New())} @@ -200,6 +266,8 @@ func (suite *DatabaseTests) TestSetAccessionID() { stableID := "TEST:000-1234-4567" err = db.SetAccessionID(stableID, fileID) assert.NoError(suite.T(), err, "got (%v) when getting file archive information", err) + + db.Close() } func (suite *DatabaseTests) TestCheckAccessionIDExists() { @@ -207,7 +275,7 @@ func (suite *DatabaseTests) TestCheckAccessionIDExists() { assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestCheckAccessionIDExists.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestCheckAccessionIDExists.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New()), 1000, "/tmp/TestCheckAccessionIDExists.c4gh", fmt.Sprintf("%x", sha256.New()), 987, fmt.Sprintf("%x", sha256.New())} @@ -226,6 +294,63 @@ func (suite *DatabaseTests) TestCheckAccessionIDExists() { duplicate, err := db.CheckAccessionIDExists(stableID, uuid.New().String()) assert.NoError(suite.T(), err, "got (%v) when getting file archive information", err) assert.Equal(suite.T(), "duplicate", duplicate) + + db.Close() +} + +func (suite *DatabaseTests) TestGetAccessionID() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile(nil, "/testuser/TestSetAccessionID.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New()), 1000, "/tmp/TestSetAccessionID.c4gh", fmt.Sprintf("%x", sha256.New()), 987, fmt.Sprintf("%x", sha256.New())} + + err = db.SetArchived(fileInfo, fileID) + assert.NoError(suite.T(), err, "got (%v) when marking file as Archived") + err = db.SetVerified(fileInfo, fileID) + assert.NoError(suite.T(), err, "got (%v) when marking file as verified", err) + stableID := "TEST:000-1234-4567" + err = db.SetAccessionID(stableID, fileID) + assert.NoError(suite.T(), err, "got (%v) when getting file archive information", err) + + res, err := db.GetAccessionID(fileID) + assert.NoError(suite.T(), err, "got (%v) when getting accessionID of file", err) + assert.Equal(suite.T(), stableID, res, "retrieved accessionID is wrong") + + db.Close() +} + +func (suite *DatabaseTests) TestGetAccessionID_wrongFileID() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + // register a file in the database + fileID, err := db.RegisterFile(nil, "/testuser/TestSetAccessionID.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New()), 1000, "/tmp/TestSetAccessionID.c4gh", fmt.Sprintf("%x", sha256.New()), 987, fmt.Sprintf("%x", sha256.New())} + + err = db.SetArchived(fileInfo, fileID) + assert.NoError(suite.T(), err, "got (%v) when marking file as Archived") + err = db.SetVerified(fileInfo, fileID) + assert.NoError(suite.T(), err, "got (%v) when marking file as verified", err) + stableID := "TEST:000-1234-4567" + err = db.SetAccessionID(stableID, fileID) + assert.NoError(suite.T(), err, "got (%v) when getting file archive information", err) + + // locally reduce RetryTimes to avoid 30s waiting limit of testsuite + RetryTimes = 2 + + // check for bad format + _, err = db.GetAccessionID("someFileID") + assert.ErrorContains(suite.T(), err, "invalid input syntax for type uuid") + + // check for non-existent fileID + _, err = db.GetAccessionID(uuid.New().String()) + assert.ErrorContains(suite.T(), err, "no rows in result set") + + db.Close() } func (suite *DatabaseTests) TestGetFileInfo() { @@ -233,7 +358,7 @@ func (suite *DatabaseTests) TestGetFileInfo() { assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestGetFileInfo.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetFileInfo.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") encSha := sha256.New() @@ -257,6 +382,8 @@ func (suite *DatabaseTests) TestGetFileInfo() { assert.Equal(suite.T(), "/tmp/TestGetFileInfo.c4gh", info.Path) assert.Equal(suite.T(), "11c94bc7fb13afeb2b3fb16c1dbe9206dc09560f1b31420f2d46210ca4ded0a8", info.ArchiveChecksum) assert.Equal(suite.T(), "a671218c2418aa51adf97e33c5c91a720289ba3c9fd0d36f6f4bf9610730749f", info.DecryptedChecksum) + + db.Close() } func (suite *DatabaseTests) TestMapFilesToDataset() { @@ -265,7 +392,7 @@ func (suite *DatabaseTests) TestMapFilesToDataset() { accessions := []string{} for i := 1; i < 12; i++ { - fileID, err := db.RegisterFile(fmt.Sprintf("/testuser/TestMapFilesToDataset-%d.c4gh", i), "testuser") + fileID, err := db.RegisterFile(nil, fmt.Sprintf("/testuser/TestMapFilesToDataset-%d.c4gh", i), "testuser") assert.NoError(suite.T(), err, "failed to register file in database") err = db.SetAccessionID(fmt.Sprintf("acession-%d", i), fileID) @@ -294,6 +421,8 @@ func (suite *DatabaseTests) TestMapFilesToDataset() { suite.FailNow("failed to get dataset members from database") } assert.Equal(suite.T(), 5, dsMembers) + + db.Close() } func (suite *DatabaseTests) TestGetInboxPath() { @@ -302,7 +431,7 @@ func (suite *DatabaseTests) TestGetInboxPath() { accessions := []string{} for i := 0; i < 5; i++ { - fileID, err := db.RegisterFile(fmt.Sprintf("/testuser/TestGetInboxPath-00%d.c4gh", i), "testuser") + fileID, err := db.RegisterFile(nil, fmt.Sprintf("/testuser/TestGetInboxPath-00%d.c4gh", i), "testuser") assert.NoError(suite.T(), err, "failed to register file in database") err = db.SetAccessionID(fmt.Sprintf("acession-00%d", i), fileID) @@ -316,6 +445,8 @@ func (suite *DatabaseTests) TestGetInboxPath() { assert.NoError(suite.T(), err, "getInboxPath failed") assert.Contains(suite.T(), path, "/testuser/TestGetInboxPath") } + + db.Close() } func (suite *DatabaseTests) TestUpdateDatasetEvent() { @@ -324,7 +455,7 @@ func (suite *DatabaseTests) TestUpdateDatasetEvent() { accessions := []string{} for i := 0; i < 5; i++ { - fileID, err := db.RegisterFile(fmt.Sprintf("/testuser/TestGetInboxPath-00%d.c4gh", i), "testuser") + fileID, err := db.RegisterFile(nil, fmt.Sprintf("/testuser/TestGetInboxPath-00%d.c4gh", i), "testuser") assert.NoError(suite.T(), err, "failed to register file in database") err = db.SetAccessionID(fmt.Sprintf("acession-00%d", i), fileID) @@ -349,6 +480,8 @@ func (suite *DatabaseTests) TestUpdateDatasetEvent() { err = db.UpdateDatasetEvent(dID, "deprecated", "{\"type\": \"deprecate\"}") assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + db.Close() } func (suite *DatabaseTests) TestGetHeaderForStableID() { @@ -356,7 +489,7 @@ func (suite *DatabaseTests) TestGetHeaderForStableID() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestGetHeaderForStableID.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetHeaderForStableID.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") err = db.StoreHeader([]byte("HEADER"), fileID) @@ -369,6 +502,8 @@ func (suite *DatabaseTests) TestGetHeaderForStableID() { header, err := db.GetHeaderForStableID("TEST:010-1234-4567") assert.NoError(suite.T(), err, "failed to get header for stable ID: %v", err) assert.Equal(suite.T(), header, []byte("HEADER"), "did not get expected header") + + db.Close() } func (suite *DatabaseTests) TestGetSyncData() { @@ -376,7 +511,7 @@ func (suite *DatabaseTests) TestGetSyncData() { assert.NoError(suite.T(), err, "got %v when creating new connection", err) // register a file in the database - fileID, err := db.RegisterFile("/testuser/TestGetGetSyncData.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetGetSyncData.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") checksum := fmt.Sprintf("%x", sha256.New().Sum(nil)) @@ -397,6 +532,8 @@ func (suite *DatabaseTests) TestGetSyncData() { assert.Equal(suite.T(), "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", fileData.Checksum, "did not get expected file checksum") assert.Equal(suite.T(), "/testuser/TestGetGetSyncData.c4gh", fileData.FilePath, "did not get expected file path") assert.Equal(suite.T(), "testuser", fileData.User, "did not get expected user") + + db.Close() } func (suite *DatabaseTests) TestCheckIfDatasetExists() { @@ -405,7 +542,7 @@ func (suite *DatabaseTests) TestCheckIfDatasetExists() { accessions := []string{} for i := 0; i <= 3; i++ { - fileID, err := db.RegisterFile(fmt.Sprintf("/testuser/TestCheckIfDatasetExists-%d.c4gh", i), "testuser") + fileID, err := db.RegisterFile(nil, fmt.Sprintf("/testuser/TestCheckIfDatasetExists-%d.c4gh", i), "testuser") assert.NoError(suite.T(), err, "failed to register file in database") err = db.SetAccessionID(fmt.Sprintf("accession-%d", i), fileID) @@ -430,13 +567,15 @@ func (suite *DatabaseTests) TestCheckIfDatasetExists() { ok, err = db.checkIfDatasetExists("missing dataset") assert.NoError(suite.T(), err, "check if dataset exists failed") assert.Equal(suite.T(), ok, false) + + db.Close() } func (suite *DatabaseTests) TestGetArchivePath() { db, err := NewSDAdb(suite.dbConf) assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) - fileID, err := db.RegisterFile("/testuser/TestGetArchivePath-001.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetArchivePath-001.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") checksum := fmt.Sprintf("%x", sha256.New()) @@ -451,6 +590,8 @@ func (suite *DatabaseTests) TestGetArchivePath() { path, err := db.getArchivePath("acession-0001") assert.NoError(suite.T(), err, "getArchivePath failed") assert.Equal(suite.T(), path, corrID) + + db.Close() } func (suite *DatabaseTests) TestGetUserFiles() { @@ -465,13 +606,13 @@ func (suite *DatabaseTests) TestGetUserFiles() { sub = "submission_b" } - fileID, err := db.RegisterFile(fmt.Sprintf("%v/%s/TestGetUserFiles-00%d.c4gh", testUser, sub, i), testUser) + fileID, err := db.RegisterFile(nil, fmt.Sprintf("%v/%s/TestGetUserFiles-00%d.c4gh", testUser, sub, i), testUser) assert.NoError(suite.T(), err, "failed to register file in database") - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, testUser, "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", testUser, "{}", "{}") assert.NoError(suite.T(), err, "failed to update satus of file in database") err = db.SetAccessionID(fmt.Sprintf("stableID-00%d", i), fileID) assert.NoError(suite.T(), err, "failed to update satus of file in database") - err = db.UpdateFileEventLog(fileID, "ready", fileID, testUser, "{}", "{}") + err = db.UpdateFileEventLog(fileID, "ready", testUser, "{}", "{}") assert.NoError(suite.T(), err, "failed to update satus of file in database") } filelist, err := db.GetUserFiles("unknownuser", "", true) @@ -490,23 +631,8 @@ func (suite *DatabaseTests) TestGetUserFiles() { filteredFilelist, err := db.GetUserFiles(testUser, fmt.Sprintf("%s/submission_b", testUser), true) assert.NoError(suite.T(), err, "failed to get file list") assert.Equal(suite.T(), 3, len(filteredFilelist), "file list is of incorrect length") -} - -func (suite *DatabaseTests) TestGetCorrID() { - db, err := NewSDAdb(suite.dbConf) - assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) - filePath := "/testuser/file10.c4gh" - user := "testuser" - - fileID, err := db.RegisterFile(filePath, user) - assert.NoError(suite.T(), err, "failed to register file in database") - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") - assert.NoError(suite.T(), err, "failed to update satus of file in database") - - corrID, err := db.GetCorrID(user, filePath, "") - assert.NoError(suite.T(), err, "failed to get correlation ID of file in database") - assert.Equal(suite.T(), fileID, corrID) + db.Close() } func (suite *DatabaseTests) TestGetCorrID_sameFilePath() { @@ -516,11 +642,11 @@ func (suite *DatabaseTests) TestGetCorrID_sameFilePath() { filePath := "/testuser/file10.c4gh" user := "testuser" - fileID, err := db.RegisterFile(filePath, user) + fileID, err := db.RegisterFile(nil, filePath, user) if err != nil { suite.FailNow("failed to register file in database") } - if err := db.UpdateFileEventLog(fileID, "archived", fileID, user, "{}", "{}"); err != nil { + if err := db.UpdateFileEventLog(fileID, "archived", user, "{}", "{}"); err != nil { suite.FailNow("failed to update satus of file in database") } @@ -529,61 +655,21 @@ func (suite *DatabaseTests) TestGetCorrID_sameFilePath() { if err := db.SetArchived(fileInfo, fileID); err != nil { suite.FailNow("failed to mark file as archived") } - if err := db.UpdateFileEventLog(fileID, "archived", fileID, user, "{}", "{}"); err != nil { + if err := db.UpdateFileEventLog(fileID, "archived", user, "{}", "{}"); err != nil { suite.FailNow("failed to update satus of file in database") } if err = db.SetAccessionID("stableID", fileID); err != nil { suite.FailNowf("got (%s) when setting stable ID: %s, %s", err.Error(), "stableID", fileID) } - fileID2, err := db.RegisterFile(filePath, user) + fileID2, err := db.RegisterFile(nil, filePath, user) assert.NoError(suite.T(), err, "failed to register file in database") - if err := db.UpdateFileEventLog(fileID2, "uploaded", fileID2, user, "{}", "{}"); err != nil { + if err := db.UpdateFileEventLog(fileID2, "uploaded", user, "{}", "{}"); err != nil { suite.FailNow("failed to update satus of file in database") } assert.NotEqual(suite.T(), fileID, fileID2) - corrID, err := db.GetCorrID(user, filePath, "") - assert.NoError(suite.T(), err, "failed to get correlation ID of file in database") - assert.Equal(suite.T(), fileID2, corrID) -} - -func (suite *DatabaseTests) TestGetCorrID_wrongFilePath() { - db, err := NewSDAdb(suite.dbConf) - assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) - - filePath := "/testuser/file10.c4gh" - user := "testuser" - - fileID, err := db.RegisterFile(filePath, user) - assert.NoError(suite.T(), err, "failed to register file in database") - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") - assert.NoError(suite.T(), err, "failed to update status of file in database") - - corrID, err := db.GetCorrID(user, "/testuser/file20.c4gh", "") - assert.EqualError(suite.T(), err, "sql: no rows in result set") - assert.Equal(suite.T(), "", corrID) -} - -func (suite *DatabaseTests) TestGetCorrID_fileWithAccessionID() { - db, err := NewSDAdb(suite.dbConf) - assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) - - filePath := "/testuser/file10.c4gh" - user := "testuser" - - fileID, err := db.RegisterFile(filePath, user) - assert.NoError(suite.T(), err, "failed to register file in database") - if err := db.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}"); err != nil { - suite.FailNow("failed to update satus of file in database") - } - if err = db.SetAccessionID("stableID", fileID); err != nil { - suite.FailNowf("got (%s) when setting stable ID: %s, %s", err.Error(), "stableID", fileID) - } - - corrID, err := db.GetCorrID(user, filePath, "stableID") - assert.NoError(suite.T(), err, "failed to get correlation ID of file in database") - assert.Equal(suite.T(), fileID, corrID) + db.Close() } func (suite *DatabaseTests) TestListActiveUsers() { @@ -595,21 +681,15 @@ func (suite *DatabaseTests) TestListActiveUsers() { for _, user := range testUsers { for i := 0; i < testCases; i++ { filePath := fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i) - fileID, err := db.RegisterFile(filePath, user) + fileID, err := db.RegisterFile(nil, filePath, user) if err != nil { suite.FailNow("Failed to register file") } - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { suite.FailNow("Failed to update file event log") } - corrID, err := db.GetCorrID(user, filePath, "") - if err != nil { - suite.FailNow("Failed to get CorrID for file") - } - assert.Equal(suite.T(), fileID, corrID) - checksum := fmt.Sprintf("%x", sha256.New().Sum(nil)) fileInfo := FileInfo{fmt.Sprintf("%x", sha256.New().Sum(nil)), 1234, filePath, checksum, 999, fmt.Sprintf("%x", sha256.New())} err = db.SetArchived(fileInfo, fileID) @@ -628,6 +708,8 @@ func (suite *DatabaseTests) TestListActiveUsers() { suite.FailNowf("got (%s) when setting stable ID: %s, %s", err.Error(), stableID, fileID) } } + + db.Close() } err = db.MapFilesToDataset("test-dataset-01", []string{"accession_User-A_00", "accession_User-A_01", "accession_User-A_02"}) @@ -643,6 +725,8 @@ func (suite *DatabaseTests) TestListActiveUsers() { userList, err := db.ListActiveUsers() assert.NoError(suite.T(), err, "failed to list users from DB") assert.Equal(suite.T(), 3, len(userList)) + + db.Close() } func (suite *DatabaseTests) TestGetDatasetStatus() { @@ -652,21 +736,15 @@ func (suite *DatabaseTests) TestGetDatasetStatus() { for i := 0; i < testCases; i++ { filePath := fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", "User-Q", i) - fileID, err := db.RegisterFile(filePath, "User-Q") + fileID, err := db.RegisterFile(nil, filePath, "User-Q") if err != nil { suite.FailNow("Failed to register file") } - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, "User-Q", "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", "User-Q", "{}", "{}") if err != nil { suite.FailNow("Failed to update file event log") } - corrID, err := db.GetCorrID("User-Q", filePath, "") - if err != nil { - suite.FailNow("Failed to get CorrID for file") - } - assert.Equal(suite.T(), fileID, corrID) - checksum := fmt.Sprintf("%x", sha256.New().Sum(nil)) fileInfo := FileInfo{ fmt.Sprintf("%x", sha256.New().Sum(nil)), @@ -715,6 +793,8 @@ func (suite *DatabaseTests) TestGetDatasetStatus() { status, err = db.GetDatasetStatus(dID) assert.NoError(suite.T(), err, "got (%v) when no error weas expected") assert.Equal(suite.T(), "deprecated", status) + + db.Close() } func (suite *DatabaseTests) TestAddKeyHash() { @@ -732,6 +812,8 @@ func (suite *DatabaseTests) TestAddKeyHash() { err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.encryption_keys WHERE key_hash=$1 AND description=$2)", keyHex, keyDescription).Scan(&exists) assert.NoError(suite.T(), err, "failed to verify key hash existence") assert.True(suite.T(), exists, "key hash was not added to the database") + + db.Close() } func (suite *DatabaseTests) TestListKeyHashes() { @@ -752,6 +834,8 @@ func (suite *DatabaseTests) TestListKeyHashes() { hashList[0].CreatedAt = ct.Format(time.DateOnly) assert.NoError(suite.T(), err, "failed to verify key hash existence") assert.Equal(suite.T(), expectedResponse, hashList[0], "key hash was not added to the database") + + db.Close() } func (suite *DatabaseTests) TestListKeyHashes_emptyTable() { @@ -761,6 +845,8 @@ func (suite *DatabaseTests) TestListKeyHashes_emptyTable() { hashList, err := db.ListKeyHashes() assert.NoError(suite.T(), err, "failed to verify key hash existence") assert.Equal(suite.T(), []C4ghKeyHash{}, hashList, "fuu") + + db.Close() } func (suite *DatabaseTests) TestDeprecateKeyHashes() { @@ -769,6 +855,8 @@ func (suite *DatabaseTests) TestDeprecateKeyHashes() { assert.NoError(suite.T(), db.AddKeyHash("cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc32", "this is a test key"), "failed to register key in database") assert.NoError(suite.T(), db.DeprecateKeyHash("cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc32"), "failure when deprecating keyhash") + + db.Close() } func (suite *DatabaseTests) TestDeprecateKeyHashes_wrongHash() { @@ -777,6 +865,8 @@ func (suite *DatabaseTests) TestDeprecateKeyHashes_wrongHash() { assert.NoError(suite.T(), db.AddKeyHash("cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc11", "this is a another key"), "failed to register key in database") assert.EqualError(suite.T(), db.DeprecateKeyHash("wr0n6h4sh"), "key hash not found or already deprecated", "failure when deprecating non existing keyhash") + + db.Close() } func (suite *DatabaseTests) TestDeprecateKeyHashes_alreadyDeprecated() { @@ -788,6 +878,8 @@ func (suite *DatabaseTests) TestDeprecateKeyHashes_alreadyDeprecated() { // we should not be able to change the deprecation date assert.EqualError(suite.T(), db.DeprecateKeyHash("cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc54"), "key hash not found or already deprecated", "failure when deprecating keyhash") + + db.Close() } func (suite *DatabaseTests) TestSetKeyHash() { @@ -799,7 +891,7 @@ func (suite *DatabaseTests) TestSetKeyHash() { keyDescription := "this is a test key" err = db.addKeyHash(keyHex, keyDescription) assert.NoError(suite.T(), err, "failed to register key in database") - fileID, err := db.RegisterFile("/testuser/file1.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/file1.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") // Test that the key hash can be set in the files table @@ -811,6 +903,8 @@ func (suite *DatabaseTests) TestSetKeyHash() { err = db.DB.QueryRow("SELECT EXISTS(SELECT 1 FROM sda.files WHERE key_hash=$1 AND id=$2)", keyHex, fileID).Scan(&exists) assert.NoError(suite.T(), err, "failed to verify key hash set for file") assert.True(suite.T(), exists, "key hash was not set for file in the database") + + db.Close() } func (suite *DatabaseTests) TestSetKeyHash_wrongHash() { @@ -821,13 +915,97 @@ func (suite *DatabaseTests) TestSetKeyHash_wrongHash() { keyDescription := "this is a test hash" err = db.addKeyHash(keyHex, keyDescription) assert.NoError(suite.T(), err, "failed to register key in database") - fileID, err := db.RegisterFile("/testuser/file2.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/file2.c4gh", "testuser") assert.NoError(suite.T(), err, "failed to register file in database") // Ensure failure if a non existing hash is used newKeyHex := "6af1407abc74656b8913a7d323c4bfd30bf7c8ca359f74ae35357acef29dc502" err = db.SetKeyHash(newKeyHex, fileID) assert.ErrorContains(suite.T(), err, "violates foreign key constraint") + + db.Close() +} + +func (suite *DatabaseTests) TestGetKeyHash() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + // Register a new key and a new file + keyHex := `6af1407abc74656b8913a7d323c4bfd30bf7c8ca359f74ae35357acef29dc509` + keyDescription := "this is a test key" + err = db.addKeyHash(keyHex, keyDescription) + assert.NoError(suite.T(), err, "failed to register key in database") + fileID, err := db.RegisterFile(nil, "/testuser/file1.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + err = db.SetKeyHash(keyHex, fileID) + assert.NoError(suite.T(), err, "failed to set key hash in database") + + // Test happy path + keyHash, err := db.GetKeyHash(fileID) + assert.NoError(suite.T(), err, "Could not get key hash") + assert.Equal(suite.T(), keyHex, keyHash) + + db.Close() +} + +func (suite *DatabaseTests) TestGetKeyHash_wrongFileID() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + // Register a new key and a new file + keyHex := `6af1407abc74656b8913a7d323c4bfd30bf7c8ca359f74ae35357acef29dc509` + keyDescription := "this is a test key" + err = db.addKeyHash(keyHex, keyDescription) + assert.NoError(suite.T(), err, "failed to register key in database") + fileID, err := db.RegisterFile(nil, "/testuser/file1.c4gh", "testuser") + assert.NoError(suite.T(), err, "failed to register file in database") + err = db.SetKeyHash(keyHex, fileID) + assert.NoError(suite.T(), err, "failed to set key hash in database") + + // Test that using an unknown fileID produces an error + _, err = db.GetKeyHash("097e1dc9-6b42-42bf-966d-dece6fefda09") + assert.ErrorContains(suite.T(), err, "no rows in result set") + + db.Close() +} + +func (suite *DatabaseTests) TestCheckKeyHash() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + assert.NoError(suite.T(), db.AddKeyHash("cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc23", "this is a test key"), "failed to register key in database") + anotherKeyhash := "cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc99" + assert.NoError(suite.T(), db.AddKeyHash(anotherKeyhash, "this is a another key"), "failed to register key in database") + + err = db.CheckKeyHash(anotherKeyhash) + assert.NoError(suite.T(), err, "failed to verify active key hash lookup") + + db.Close() +} + +func (suite *DatabaseTests) TestCheckKeyHash_keyDeprecated() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + assert.NoError(suite.T(), db.AddKeyHash("cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc23", "this is a test key"), "failed to register key in database") + anotherKeyhash := "cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc99" + assert.NoError(suite.T(), db.AddKeyHash(anotherKeyhash, "this is a another key"), "failed to register key in database") + assert.NoError(suite.T(), db.DeprecateKeyHash(anotherKeyhash), "failure when deprecating keyhash") + + err = db.CheckKeyHash(anotherKeyhash) + assert.ErrorContains(suite.T(), err, "the c4gh key hash has been deprecated") + + db.Close() +} + +func (suite *DatabaseTests) TestCheckKeyHash_keyNonExistent() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + assert.NoError(suite.T(), db.AddKeyHash("cbd8f5cc8d936ce437a52cd7991453839581fc69ee26e0daefde6a5d2660fc23", "this is a test key"), "failed to register key in database") + + err = db.CheckKeyHash("somekeyhash") + assert.ErrorContains(suite.T(), err, "the c4gh key hash is not registered") + + db.Close() } func (suite *DatabaseTests) TestListDatasets() { @@ -837,21 +1015,15 @@ func (suite *DatabaseTests) TestListDatasets() { for i := 0; i < testCases; i++ { filePath := fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", "User-Q", i) - fileID, err := db.RegisterFile(filePath, "User-Q") + fileID, err := db.RegisterFile(nil, filePath, "User-Q") if err != nil { suite.FailNow("Failed to register file") } - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, "User-Q", "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", "User-Q", "{}", "{}") if err != nil { suite.FailNow("Failed to update file event log") } - corrID, err := db.GetCorrID("User-Q", filePath, "") - if err != nil { - suite.FailNow("Failed to get CorrID for file") - } - assert.Equal(suite.T(), fileID, corrID) - checksum := fmt.Sprintf("%x", sha256.New().Sum(nil)) fileInfo := FileInfo{ fmt.Sprintf("%x", sha256.New().Sum(nil)), @@ -912,6 +1084,8 @@ func (suite *DatabaseTests) TestListDatasets() { assert.NoError(suite.T(), err, "got (%v) when listing datasets", err) assert.Equal(suite.T(), "test-get-dataset-01", datasets[0].DatasetID) assert.Equal(suite.T(), "registered", datasets[1].Status) + + db.Close() } func (suite *DatabaseTests) TestListUserDatasets() { @@ -921,21 +1095,15 @@ func (suite *DatabaseTests) TestListUserDatasets() { user := "User-Q" for i := 0; i < 6; i++ { filePath := fmt.Sprintf("/%v/TestGetUserFiles-00%d.c4gh", user, i) - fileID, err := db.RegisterFile(filePath, user) + fileID, err := db.RegisterFile(nil, filePath, user) if err != nil { suite.FailNow("Failed to register file") } - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, user, "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") if err != nil { suite.FailNow("Failed to update file event log") } - corrID, err := db.GetCorrID(user, filePath, "") - if err != nil { - suite.FailNow("Failed to get CorrID for file") - } - assert.Equal(suite.T(), fileID, corrID) - checksum := fmt.Sprintf("%x", sha256.New().Sum(nil)) fileInfo := FileInfo{ fmt.Sprintf("%x", sha256.New().Sum(nil)), @@ -978,7 +1146,7 @@ func (suite *DatabaseTests) TestListUserDatasets() { suite.FailNow("failed to update dataset event") } - fileID, err := db.RegisterFile("filePath", "user") + fileID, err := db.RegisterFile(nil, "filePath", "user") if err != nil { suite.FailNow("Failed to register file") } @@ -1004,6 +1172,8 @@ func (suite *DatabaseTests) TestListUserDatasets() { assert.NoError(suite.T(), err, "got (%v) when listing datasets for a user", err) assert.Equal(suite.T(), 2, len(datasets)) assert.Equal(suite.T(), "test-user-dataset-01", datasets[0].DatasetID) + + db.Close() } func (suite *DatabaseTests) TestUpdateUserInfo() { @@ -1024,6 +1194,8 @@ func (suite *DatabaseTests) TestUpdateUserInfo() { err = db.DB.QueryRow("SELECT name FROM sda.userinfo WHERE id=$1", userID).Scan(&name2) assert.NoError(suite.T(), err, "could not select user info: %v", err) assert.Equal(suite.T(), name, name2, "user info table did not update correctly") + + db.Close() } func (suite *DatabaseTests) TestUpdateUserInfo_newInfo() { @@ -1054,13 +1226,15 @@ func (suite *DatabaseTests) TestUpdateUserInfo_newInfo() { err = db.DB.QueryRow("SELECT groups FROM sda.userinfo WHERE id=$1", userID).Scan(pq.Array(&dbgroups)) assert.NoError(suite.T(), err) assert.Equal(suite.T(), groups, dbgroups) + + db.Close() } func (suite *DatabaseTests) TestGetReVerificationData() { db, err := NewSDAdb(suite.dbConf) assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) - fileID, err := db.RegisterFile("/testuser/TestGetReVerificationData.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetReVerificationData.c4gh", "testuser") if err != nil { suite.FailNow("failed to register file in database") } @@ -1092,13 +1266,51 @@ func (suite *DatabaseTests) TestGetReVerificationData() { data, err := db.GetReVerificationData(accession) assert.NoError(suite.T(), err, "failed to get verification data") assert.Equal(suite.T(), "/archive/TestGetReVerificationData.c4gh", data.ArchivePath) + + db.Close() +} + +func (suite *DatabaseTests) TestGetReVerificationDataFromFileID() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) + + fileID, err := db.RegisterFile(nil, "/testuser/TestGetReVerificationData.c4gh", "testuser") + if err != nil { + suite.FailNow("failed to register file in database") + } + + encSha := sha256.New() + _, err = encSha.Write([]byte("Checksum")) + if err != nil { + suite.FailNow("failed to generate checksum") + } + + decSha := sha256.New() + _, err = decSha.Write([]byte("DecryptedChecksum")) + if err != nil { + suite.FailNow("failed to generate checksum") + } + + fileInfo := FileInfo{fmt.Sprintf("%x", encSha.Sum(nil)), 2000, "/archive/TestGetReVerificationData.c4gh", fmt.Sprintf("%x", decSha.Sum(nil)), 1987, fmt.Sprintf("%x", sha256.New())} + if err = db.SetArchived(fileInfo, fileID); err != nil { + suite.FailNow("failed to archive file") + } + if err = db.SetVerified(fileInfo, fileID); err != nil { + suite.FailNow("failed to mark file as verified") + } + + data, err := db.GetReVerificationDataFromFileID(fileID) + assert.NoError(suite.T(), err, "failed to get verification data from fileID") + assert.Equal(suite.T(), "/archive/TestGetReVerificationData.c4gh", data.ArchivePath) + + db.Close() } func (suite *DatabaseTests) TestGetReVerificationData_wrongAccessionID() { db, err := NewSDAdb(suite.dbConf) assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) - fileID, err := db.RegisterFile("/testuser/TestGetReVerificationData.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetReVerificationData.c4gh", "testuser") if err != nil { suite.FailNow("failed to register file in database") } @@ -1131,13 +1343,15 @@ func (suite *DatabaseTests) TestGetReVerificationData_wrongAccessionID() { data, err := db.GetReVerificationData("accession") assert.EqualError(suite.T(), err, "sql: no rows in result set") assert.Equal(suite.T(), schema.IngestionVerification{}, data) + + db.Close() } func (suite *DatabaseTests) TestGetDecryptedChecksum() { db, err := NewSDAdb(suite.dbConf) assert.NoError(suite.T(), err, "got (%v) when creating new connection", err) - fileID, err := db.RegisterFile("/testuser/TestGetDecryptedChecksum.c4gh", "testuser") + fileID, err := db.RegisterFile(nil, "/testuser/TestGetDecryptedChecksum.c4gh", "testuser") if err != nil { suite.FailNow("failed to register file in database") } @@ -1166,6 +1380,8 @@ func (suite *DatabaseTests) TestGetDecryptedChecksum() { checksum, err := db.GetDecryptedChecksum(fileID) assert.NoError(suite.T(), err, "failed to get verification data") assert.Equal(suite.T(), fmt.Sprintf("%x", decSha.Sum(nil)), checksum) + + db.Close() } func (suite *DatabaseTests) TestGetDsatasetFiles() { @@ -1175,21 +1391,15 @@ func (suite *DatabaseTests) TestGetDsatasetFiles() { for i := 0; i < testCases; i++ { filePath := fmt.Sprintf("/%v/TestGetDsatasetFiles-00%d.c4gh", "User-Q", i) - fileID, err := db.RegisterFile(filePath, "User-Q") + fileID, err := db.RegisterFile(nil, filePath, "User-Q") if err != nil { suite.FailNow("Failed to register file") } - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, "User-Q", "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", "User-Q", "{}", "{}") if err != nil { suite.FailNow("Failed to update file event log") } - corrID, err := db.GetCorrID("User-Q", filePath, "") - if err != nil { - suite.FailNow("Failed to get CorrID for file") - } - assert.Equal(suite.T(), fileID, corrID) - checksum := fmt.Sprintf("%x", sha256.New().Sum(nil)) fileInfo := FileInfo{ fmt.Sprintf("%x", sha256.New().Sum(nil)), @@ -1224,6 +1434,8 @@ func (suite *DatabaseTests) TestGetDsatasetFiles() { accessions, err := db.GetDatasetFiles(dID) assert.NoError(suite.T(), err, "failed to get accessions for a dataset") assert.Equal(suite.T(), []string{"accession_User-Q_00", "accession_User-Q_01", "accession_User-Q_02"}, accessions) + + db.Close() } func (suite *DatabaseTests) TestGetInboxFilePathFromID() { @@ -1232,11 +1444,11 @@ func (suite *DatabaseTests) TestGetInboxFilePathFromID() { user := "UserX" filePath := fmt.Sprintf("/%v/Deletefile1.c4gh", user) - fileID, err := db.RegisterFile(filePath, user) + fileID, err := db.RegisterFile(nil, filePath, user) if err != nil { suite.FailNow("Failed to register file") } - err = db.UpdateFileEventLog(fileID, "uploaded", fileID, "User-z", "{}", "{}") + err = db.UpdateFileEventLog(fileID, "uploaded", "User-z", "{}", "{}") if err != nil { suite.FailNow("Failed to update file event log") } @@ -1244,10 +1456,11 @@ func (suite *DatabaseTests) TestGetInboxFilePathFromID() { assert.NoError(suite.T(), err) assert.Equal(suite.T(), path, filePath) - err = db.UpdateFileEventLog(fileID, "archived", fileID, user, "{}", "{}") + err = db.UpdateFileEventLog(fileID, "archived", user, "{}", "{}") assert.NoError(suite.T(), err) _, err = db.getInboxFilePathFromID(user, fileID) assert.Error(suite.T(), err) + db.Close() } func (suite *DatabaseTests) TestGetFileIDByUserPathAndStatus() { @@ -1256,7 +1469,7 @@ func (suite *DatabaseTests) TestGetFileIDByUserPathAndStatus() { user := "UserX" filePath := fmt.Sprintf("/%v/Deletefile1.c4gh", user) - fileID, err := db.RegisterFile(filePath, user) + fileID, err := db.RegisterFile(nil, filePath, user) if err != nil { suite.FailNow("Failed to register file") } @@ -1270,7 +1483,7 @@ func (suite *DatabaseTests) TestGetFileIDByUserPathAndStatus() { assert.Equal(suite.T(), fileID, fileID2) // update the status of the file - err = db.UpdateFileEventLog(fileID, "archived", fileID, user, "{}", "{}") + err = db.UpdateFileEventLog(fileID, "archived", user, "{}", "{}") if err != nil { suite.FailNow("Failed to update file event log") } @@ -1284,4 +1497,45 @@ func (suite *DatabaseTests) TestGetFileIDByUserPathAndStatus() { fileID2, err = db.getFileIDByUserPathAndStatus(user, filePath, "archived") assert.NoError(suite.T(), err) assert.Equal(suite.T(), fileID, fileID2) + + db.Close() +} + +func (suite *DatabaseTests) TestGetFileDetailsFromUUI_Found() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "failed to create new connection") + + // Register a file to get a valid UUID + filePath := "/dummy_user.org/Dummy_folder/dummyfile.c4gh" + user := "dummy@user.org" + fileID, err := db.RegisterFile(nil, filePath, user) + if err != nil { + suite.FailNow("failed to register file in database") + } + + // Update event log to ensure correlation ID is set + err = db.UpdateFileEventLog(fileID, "uploaded", user, "{}", "{}") + if err != nil { + suite.FailNow("failed to update file event log") + } + + infoFile, err := db.GetFileDetailsFromUUID(fileID, "uploaded") + assert.NoError(suite.T(), err, "failed to get user and path from UUID") + assert.Equal(suite.T(), user, infoFile.User) + assert.Equal(suite.T(), filePath, infoFile.Path) + db.Close() +} + +func (suite *DatabaseTests) TestGetFileDetailsFromUUID_NotFound() { + db, err := NewSDAdb(suite.dbConf) + assert.NoError(suite.T(), err, "failed to create new connection") + + // Use a non-existent UUID + invalidUUID := "abc-123" + infoFile, err := db.GetFileDetailsFromUUID(invalidUUID, "uploaded") + assert.Error(suite.T(), err, "expected error for non-existent UUID") + assert.Empty(suite.T(), infoFile.User) + assert.Empty(suite.T(), infoFile.Path) + + db.Close() } diff --git a/sda/internal/helper/helper_test.go b/sda/internal/helper/helper_test.go index 01c9227ce..06b1dedb5 100644 --- a/sda/internal/helper/helper_test.go +++ b/sda/internal/helper/helper_test.go @@ -68,7 +68,7 @@ func (ts *HelperTest) TestCreateRSAToken() { assert.NoError(ts.T(), err) assert.NoError(ts.T(), set.AddKey(key)) - fmt.Println(tok) + _, _ = fmt.Println(tok) _, err = jwt.Parse([]byte(tok), jwt.WithKeySet(set, jws.WithInferAlgorithmFromKey(true)), jwt.WithValidate(true)) assert.NoError(ts.T(), err) @@ -120,7 +120,7 @@ func (ts *HelperTest) TestCreateHSToken() { assert.NoError(ts.T(), set.AddKey(jwtKey)) - fmt.Println(tok) + _, _ = fmt.Println(tok) _, err = jwt.Parse([]byte(tok), jwt.WithKeySet(set, jws.WithInferAlgorithmFromKey(true)), jwt.WithValidate(true)) assert.NoError(ts.T(), err) diff --git a/sda/internal/jsonadapter/jsonadapter_test.go b/sda/internal/jsonadapter/jsonadapter_test.go index 406a02c52..d501c932f 100644 --- a/sda/internal/jsonadapter/jsonadapter_test.go +++ b/sda/internal/jsonadapter/jsonadapter_test.go @@ -52,7 +52,7 @@ func (ts *AdapterTestSuite) SetupSuite() { } func (ts *AdapterTestSuite) TearDownSuite() { - os.RemoveAll(ts.File.Name()) + _ = os.RemoveAll(ts.File.Name()) } func (ts *AdapterTestSuite) TestAdapter_empty() { diff --git a/sda/internal/reencrypt/reencrypt_utils.go b/sda/internal/reencrypt/reencrypt_utils.go new file mode 100644 index 000000000..62ca7b794 --- /dev/null +++ b/sda/internal/reencrypt/reencrypt_utils.go @@ -0,0 +1,48 @@ +package reencrypt + +import ( + "context" + "fmt" + "time" + + "github.com/neicnordic/sensitive-data-archive/internal/config" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +// CallReencryptHeader re-encrypts the header of a file using the public key +// provided and returns the new header. The function uses gRPC to +// communicate with the re-encrypt service and handles TLS configuration +// if needed. The function also handles the case where the CA certificate +// is provided for secure communication. +func CallReencryptHeader(oldHeader []byte, c4ghPubKey string, grpcConf config.Grpc) ([]byte, error) { + var opts []grpc.DialOption + switch { + case grpcConf.ClientCreds != nil: + opts = append(opts, grpc.WithTransportCredentials(grpcConf.ClientCreds)) + default: + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + conn, err := grpc.NewClient(fmt.Sprintf("%s:%d", grpcConf.Host, grpcConf.Port), opts...) + if err != nil { + log.Errorf("failed to open a new gRPC channel, reason: %v", err) + + return nil, err + } + defer conn.Close() + + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(grpcConf.Timeout)*time.Second) + defer cancel() + + c := NewReencryptClient(conn) + res, err := c.ReencryptHeader(ctx, &ReencryptRequest{Oldheader: oldHeader, Publickey: c4ghPubKey}) + if err != nil { + log.Errorf("failed to connect to the reencrypt service, reason %v", err) + + return nil, err + } + + return res.Header, nil +} diff --git a/sda/internal/reencrypt/reencrypt_utils_test.go b/sda/internal/reencrypt/reencrypt_utils_test.go new file mode 100644 index 000000000..8d4edbd86 --- /dev/null +++ b/sda/internal/reencrypt/reencrypt_utils_test.go @@ -0,0 +1,62 @@ +package reencrypt + +import ( + "context" + "fmt" + "net" + "testing" + + "github.com/neicnordic/sensitive-data-archive/internal/config" + "google.golang.org/grpc" +) + +type mockServer struct { + UnimplementedReencryptServer + headerResponse []byte +} + +func (s *mockServer) ReencryptHeader(ctx context.Context, req *ReencryptRequest) (*ReencryptResponse, error) { + return &ReencryptResponse{Header: s.headerResponse}, nil +} + +func TestCallReencryptHeader(t *testing.T) { + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("failed to listen: %v", err) + } + defer lis.Close() + + srv := grpc.NewServer() + mockHeader := []byte("mocked-header") + RegisterReencryptServer(srv, &mockServer{headerResponse: mockHeader}) + + go func() { + _ = srv.Serve(lis) + }() + defer srv.GracefulStop() + + host, portStr, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + t.Fatalf("failed to split host/port: %v", err) + } + var port int + _, err = fmt.Sscanf(portStr, "%d", &port) + if err != nil { + t.Fatalf("failed to parse port: %v", err) + } + + grpcConf := config.Grpc{ + Host: host, + Port: port, + Timeout: 2, + } + oldHeader := []byte("old-header") + pubKey := "test-pubkey" + res, err := CallReencryptHeader(oldHeader, pubKey, grpcConf) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if string(res) != string(mockHeader) { + t.Errorf("expected header %q, got %q", mockHeader, res) + } +} diff --git a/sda/internal/schema/schema.go b/sda/internal/schema/schema.go index f249ed52f..c878a6eff 100644 --- a/sda/internal/schema/schema.go +++ b/sda/internal/schema/schema.go @@ -67,6 +67,8 @@ func getStructName(path string) any { return new(SyncDataset) case "metadata-sync": return new(SyncMetadata) + case "rotate-key": + return new(KeyRotation) default: return "" } @@ -185,3 +187,8 @@ type C4ghPubKey struct { PubKey string `json:"pubkey"` Description string `json:"description"` } + +type KeyRotation struct { + Type string `json:"type"` + FileID string `json:"file_id"` +} diff --git a/sda/internal/schema/schema_test.go b/sda/internal/schema/schema_test.go index bb36014f7..c08c95b01 100644 --- a/sda/internal/schema/schema_test.go +++ b/sda/internal/schema/schema_test.go @@ -469,3 +469,21 @@ func TestValidateJSONBigpictureMetadtaSync(t *testing.T) { msg, _ = json.Marshal(badMsg) assert.Error(t, ValidateJSON(fmt.Sprintf("%s/bigpicture/metadata-sync.json", schemaPath), msg)) } + +func TestValidateJSONKeyRotation(t *testing.T) { + okMsg := KeyRotation{ + Type: "key_rotation", + FileID: "cd532362-e06e-4460-8490-b9ce64b8d9e7", + } + + msg, _ := json.Marshal(okMsg) + assert.Nil(t, ValidateJSON(fmt.Sprintf("%s/isolated/rotate-key.json", schemaPath), msg)) + + badMsg := KeyRotation{ + Type: "foo", + FileID: "cd532362-e06e-4460-8490-b9ce64b8d9e7", + } + + msg, _ = json.Marshal(badMsg) + assert.Error(t, ValidateJSON(fmt.Sprintf("%s/isolated/rotate-key.json", schemaPath), msg)) +} diff --git a/sda/internal/storage/storage_test.go b/sda/internal/storage/storage_test.go index a97819b19..67b626dc2 100644 --- a/sda/internal/storage/storage_test.go +++ b/sda/internal/storage/storage_test.go @@ -111,7 +111,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { @@ -131,7 +131,7 @@ func TestMain(m *testing.M) { log.Panicf("Could not purge resource: %s", err) } - os.RemoveAll(sshPath) + _ = os.RemoveAll(sshPath) os.Exit(code) } @@ -234,7 +234,7 @@ func (ts *StorageTestSuite) TestPosixBackend() { written, err := writer.Write(writeData) assert.NoError(ts.T(), err, "Failure when writing to posix writer") assert.Equal(ts.T(), len(writeData), written, "Did not write all writeData") - writer.Close() + _ = writer.Close() reader, err := backend.NewFileReader("testFile") assert.Nil(ts.T(), err, "posix NewFileReader failed when it should work") @@ -315,7 +315,7 @@ func (ts *StorageTestSuite) TestS3Backend() { written, err := writer.Write(writeData) assert.Nil(ts.T(), err, "Failure when writing to s3 writer") assert.Equal(ts.T(), len(writeData), written, "Did not write all writeData") - writer.Close() + _ = writer.Close() // sleep to allow the write to complete, otherwise the next step will fail due to timing issues. time.Sleep(1 * time.Second) @@ -341,7 +341,7 @@ func (ts *StorageTestSuite) TestS3Backend() { written, err = writer.Write(writeData) assert.Equal(ts.T(), len(writeData), written, "Did not write all writeData") assert.Nil(ts.T(), err, "Failure when writing to s3 writer") - writer.Close() + _ = writer.Close() size, err = s3back.GetFileSize("s3Creatable", true) assert.Nil(ts.T(), err, "s3 GetFileSize with expected delay failed when it should work") assert.NotNil(ts.T(), size, "Got a nil size for s3") @@ -390,7 +390,7 @@ func (ts *StorageTestSuite) TestSftpBackend() { written, err := writer.Write(writeData) assert.Nil(ts.T(), err, "Failure when writing to sftp writer") assert.Equal(ts.T(), len(writeData), written, "Did not write all writeData") - writer.Close() + _ = writer.Close() reader, err := sftpBack.NewFileReader(sftpCreatable) assert.Nil(ts.T(), err, "sftp NewFileReader failed when it should work") diff --git a/sda/internal/userauth/userauth_test.go b/sda/internal/userauth/userauth_test.go index 1a984172c..4de8aa0c0 100644 --- a/sda/internal/userauth/userauth_test.go +++ b/sda/internal/userauth/userauth_test.go @@ -94,7 +94,7 @@ func TestMain(m *testing.M) { if err != nil { return err } - res.Body.Close() + _ = res.Body.Close() return nil }); err != nil { diff --git a/sda/schemas/federated/rotate-key.json b/sda/schemas/federated/rotate-key.json new file mode 100644 index 000000000..8fd5f0904 --- /dev/null +++ b/sda/schemas/federated/rotate-key.json @@ -0,0 +1,30 @@ +{ + "title": "JSON schema for SDA key rotation message interface", + "$id": "https://github.com/neicnordic/sensitive-data-archive/tree/master/sda/schemas/federated/rotate-key.json", + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "required": [ + "type", + "file_id" + ], + "additionalProperties": true, + "properties": { + "type": { + "$id": "#/properties/type", + "type": "string", + "title": "The message type", + "description": "The message type", + "const": "key_rotation" + }, + "file_id": { + "$id": "#/properties/file_id", + "type": "string", + "title": "The unique file identifier", + "description": "The unique file identifier", + "pattern": "^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$", + "examples": [ + "420420cc43-e060-4583-a891-9f8170ee66c8" + ] + } + } +} diff --git a/sda/schemas/isolated/rotate-key.json b/sda/schemas/isolated/rotate-key.json new file mode 120000 index 000000000..09e7a2493 --- /dev/null +++ b/sda/schemas/isolated/rotate-key.json @@ -0,0 +1 @@ +../federated/rotate-key.json \ No newline at end of file