diff --git a/ai4papi/routers/v1/deployments/modules.py b/ai4papi/routers/v1/deployments/modules.py index 6c59325..be4d81e 100644 --- a/ai4papi/routers/v1/deployments/modules.py +++ b/ai4papi/routers/v1/deployments/modules.py @@ -204,23 +204,7 @@ def create_deployment( else: priority = 50 - # Remove non-compliant characters from hostname base_domain = papiconf.MAIN_CONF['lb']['domain'][vo] - hostname = utils.safe_hostname( - hostname=user_conf['general']['hostname'], - job_uuid=job_uuid, - ) - - #TODO: reenable custom hostname, when we are able to parse all node metadata - # (domain key) to build the true domain - hostname = job_uuid - - # # Check the hostname is available in all data-centers - # # (we don't know beforehand where the job will land) - # #TODO: make sure this does not break if the datacenter is unavailable - # #TODO: disallow custom hostname, pain in the ass, slower deploys - # for datacenter in papiconf.MAIN_CONF['nomad']['datacenters']: - # utils.check_domain(f"{hostname}.{datacenter}-{base_domain}") # Replace the Nomad job template nomad_conf = nomad_conf.safe_substitute( @@ -234,7 +218,7 @@ def create_deployment( 'TITLE': user_conf['general']['title'][:45], # keep only 45 first characters 'DESCRIPTION': user_conf['general']['desc'][:1000], # limit to 1K characters 'BASE_DOMAIN': base_domain, - 'HOSTNAME': hostname, + 'HOSTNAME': job_uuid, 'DOCKER_IMAGE': user_conf['general']['docker_image'], 'DOCKER_TAG': user_conf['general']['docker_tag'], 'SERVICE': user_conf['general']['service'], diff --git a/ai4papi/routers/v1/deployments/tools.py b/ai4papi/routers/v1/deployments/tools.py index 9d7e6d4..0fb87ba 100644 --- a/ai4papi/routers/v1/deployments/tools.py +++ b/ai4papi/routers/v1/deployments/tools.py @@ -200,24 +200,8 @@ def create_deployment( else: priority = 50 - # Remove non-compliant characters from hostname base_domain = papiconf.MAIN_CONF['lb']['domain'][vo] - hostname = utils.safe_hostname( - hostname=user_conf['general']['hostname'], - job_uuid=job_uuid, - ) - - #TODO: reenable custom hostname, when we are able to parse all node metadata - # (domain key) to build the true domain - hostname = job_uuid - - # # Check the hostname is available in all data-centers - # # (we don't know beforehand where the job will land) - # #TODO: make sure this does not break if the datacenter is unavailable - # #TODO: disallow custom hostname, pain in the ass, slower deploys - # for datacenter in papiconf.MAIN_CONF['nomad']['datacenters']: - # utils.check_domain(f"{hostname}.{datacenter}-{base_domain}") - + # Create a default secret for the Federated Server _ = ai4secrets.create_secret( vo=vo, @@ -247,7 +231,7 @@ def create_deployment( 'TITLE': user_conf['general']['title'][:45], # keep only 45 first characters 'DESCRIPTION': user_conf['general']['desc'][:1000], # limit to 1K characters 'BASE_DOMAIN': base_domain, - 'HOSTNAME': hostname, + 'HOSTNAME': job_uuid, 'DOCKER_IMAGE': user_conf['general']['docker_image'], 'DOCKER_TAG': user_conf['general']['docker_tag'], 'CPU_NUM': user_conf['hardware']['cpu_num'], @@ -263,8 +247,12 @@ def create_deployment( 'FEDERATED_MIN_AVAILABLE_CLIENTS': user_conf['configuration']['min_available_clients'], 'FEDERATED_STRATEGY': user_conf['configuration']['strategy'], 'MU_FEDPROX': user_conf['configuration']['mu'], - 'FEDAVGM_SERVER_FL' : user_conf['configuration']['momentum'], - 'FEDAVGM_SERVER_MOMENTUM': user_conf['configuration']['fl'] + 'FEDAVGM_SERVER_FL' : user_conf['configuration']['fl'], + 'FEDAVGM_SERVER_MOMENTUM': user_conf['configuration']['momentum'], + 'DP': user_conf['configuration']['dp'], + 'NOISE_MULT': user_conf['configuration']['noise_mult'], + 'SAMPLED_CLIENTS': user_conf['configuration']['sampled_clients'], + 'CLIP_NORM': user_conf['configuration']['clip_norm'] } ) diff --git a/ai4papi/routers/v1/secrets.py b/ai4papi/routers/v1/secrets.py index 87822b1..1c5f462 100644 --- a/ai4papi/routers/v1/secrets.py +++ b/ai4papi/routers/v1/secrets.py @@ -138,7 +138,7 @@ def get_secrets( subpath += '/' # Retrieve initial level-0 secrets - user_path = f"users/{auth_info['id']}" + user_path = f"users/{auth_info['id']}/{vo}" try: r = client.secrets.kv.v1.list_secrets( path = user_path + subpath, @@ -201,7 +201,7 @@ def create_secret( # Create secret client.secrets.kv.v1.create_or_update_secret( - path=f"users/{auth_info['id']}/{secret_path}", + path=f"users/{auth_info['id']}/{vo}/{secret_path}", mount_point='/secrets/', secret=secret_data, ) @@ -237,7 +237,7 @@ def delete_secret( # Delete secret client.secrets.kv.v1.delete_secret( - path=f"users/{auth_info['id']}/{secret_path}", + path=f"users/{auth_info['id']}/{vo}/{secret_path}", mount_point=VAULT_MOUNT_POINT, ) diff --git a/ai4papi/routers/v1/try_me/nomad.py b/ai4papi/routers/v1/try_me/nomad.py index f1b5698..2d267fa 100644 --- a/ai4papi/routers/v1/try_me/nomad.py +++ b/ai4papi/routers/v1/try_me/nomad.py @@ -1,7 +1,13 @@ +""" +This route is meant to be public for everyone authenticated to try (no VO membership +required). We deploy jobs by default in the AI4EOSC namespace. +""" + from copy import deepcopy +import types import uuid -from fastapi import APIRouter, Depends, HTTPException +from fastapi import APIRouter, Depends, HTTPException, Query from fastapi.security import HTTPBearer from ai4papi import auth @@ -18,19 +24,102 @@ ) security = HTTPBearer() +# (!) try-me jobs are always deployed in AI4EOSC +VO = "vo.ai4eosc.eu" +NAMESPACE = papiconf.MAIN_CONF['nomad']['namespaces'][VO] + + +@router.get("") +def get_deployments( + full_info: bool = Query(default=False), + authorization=Depends(security), + ): + """ + Returns a list of all deployments belonging to a user. + + Parameters: + * **vo**: Virtual Organizations from where you want to retrieve your deployments. + If no vo is provided, it will retrieve the deployments of all VOs. + * **full_info**: retrieve the full information of each deployment. + Disabled by default, as it will increase latency too much if there are many + deployments. + """ + # Retrieve authenticated user info + auth_info = auth.get_user_info(token=authorization.credentials) + + # Retrieve all jobs in namespace + jobs = nomad.get_deployments( + namespace=NAMESPACE, + owner=auth_info['id'], + prefix='try', + ) + user_jobs = [] + for j in jobs: + try: + job_info = get_deployment( + deployment_uuid=j['ID'], + full_info=full_info, + authorization=types.SimpleNamespace( + credentials=authorization.credentials # token + ), + ) + except HTTPException: # not a try-me + continue + except Exception as e: # unexpected error + raise(e) + + user_jobs.append(job_info) + + # Sort deployments by creation date + seq = [j['submit_time'] for j in user_jobs] + args = sorted(range(len(seq)), key=seq.__getitem__)[::-1] + sorted_jobs = [user_jobs[i] for i in args] + + return sorted_jobs + + +@router.get("/{deployment_uuid}") +def get_deployment( + deployment_uuid: str, + full_info: bool = Query(default=True), + authorization=Depends(security), + ): + """ + This function is used mainly to be able to retrieve the endpoint of the try_me job. + We cannot return the endpoint when creating the job, because the final endpoint will + on which datacenter the job ends up landing. + + Parameters: + * **deployment_uuid**: uuid of deployment to gather info about + + Returns a dict with info + """ + # Retrieve authenticated user info + auth_info = auth.get_user_info(token=authorization.credentials) + + job = nomad.get_deployment( + deployment_uuid=deployment_uuid, + namespace=NAMESPACE, + owner=auth_info['id'], + full_info=full_info, + ) + + # Rewrite main endpoint, otherwise it automatically selects DEEPaaS API + job['main_endpoint'] = 'ui' + + return job + @router.post("") def create_deployment( module_name: str, + title: str = Query(default=""), authorization=Depends(security), ): """ Submit a try-me deployment to Nomad. The deployment will automatically kill himself after a short amount of time. - This endpoint is meant to be public for everyone to try (no authorization required). - We deploy jobs by default in the AI4EOSC namespace. - Returns a string with the endpoint to access the API. """ # Retrieve authenticated user info @@ -51,11 +140,12 @@ def create_deployment( nomad_conf = nomad_conf.safe_substitute( { 'JOB_UUID': job_uuid, - 'NAMESPACE': 'ai4eosc', # (!) try-me jobs are always deployed in "ai4eosc" + 'NAMESPACE': NAMESPACE, + 'TITLE': title[:45], 'OWNER': auth_info['id'], 'OWNER_NAME': auth_info['name'], 'OWNER_EMAIL': auth_info['email'], - 'BASE_DOMAIN': papiconf.MAIN_CONF['lb']['domain']['vo.ai4eosc.eu'], # idem + 'BASE_DOMAIN': papiconf.MAIN_CONF['lb']['domain'][VO], 'HOSTNAME': job_uuid, 'DOCKER_IMAGE': docker_image, } @@ -67,7 +157,7 @@ def create_deployment( # Check that the target node (ie. tag='tryme') resources are available because # these jobs cannot be left queueing # We check for every resource metric (cpu, disk, ram) - stats = get_cluster_stats(vo='vo.ai4eosc.eu') + stats = get_cluster_stats(vo=VO) resources = ['cpu', 'ram', 'disk'] keys = [f"{i}_used" for i in resources] + [f"{i}_total" for i in resources] status = {k: 0 for k in keys} @@ -89,16 +179,17 @@ def create_deployment( # Check that the user hasn't too many "try-me" jobs currently running jobs = nomad.get_deployments( - namespace="ai4eosc", # (!) try-me jobs are always deployed in "ai4eosc" + namespace=NAMESPACE, owner=auth_info['id'], prefix="try", ) - if len(jobs) >= 2: + if len(jobs) >= 3: raise HTTPException( status_code=503, - detail="Sorry, but you seem to be currently running two `Try-me` environments already. " \ + detail="Sorry, but you seem to be currently running 3 `try-me` environments already. " \ "Before launching a new one, you will need to wait till one of your " \ - "existing environments gets automatically deleted (ca. 10 min)." + "existing environments gets automatically deleted (ca. 10 min) or delete it manually " \ + "in the Dashboard." ) # Submit job @@ -107,32 +198,28 @@ def create_deployment( return r -@router.get("/{deployment_uuid}") -def get_deployment( +@router.delete("/{deployment_uuid}") +def delete_deployment( deployment_uuid: str, authorization=Depends(security), ): """ - This function is used mainly to be able to retrieve the endpoint of the try_me job. - We cannot return the endpoint when creating the job, because the final endpoint will - on which datacenter the job ends up landing. + Delete a deployment. Users can only delete their own deployments. Parameters: - * **deployment_uuid**: uuid of deployment to gather info about + * **vo**: Virtual Organization where your deployment is located + * **deployment_uuid**: uuid of deployment to delete - Returns a dict with info + Returns a dict with status """ # Retrieve authenticated user info auth_info = auth.get_user_info(token=authorization.credentials) - job = nomad.get_deployment( + # Delete deployment + r = nomad.delete_deployment( deployment_uuid=deployment_uuid, - namespace="ai4eosc", # (!) try-me jobs are always deployed in "ai4eosc" + namespace=NAMESPACE, owner=auth_info['id'], - full_info=True, ) - # Rewrite main endpoint, otherwise it automatically selects DEEPaaS API - job['main_endpoint'] = 'ui' - - return job + return r diff --git a/ai4papi/utils.py b/ai4papi/utils.py index dbf52cb..20ba25f 100644 --- a/ai4papi/utils.py +++ b/ai4papi/utils.py @@ -19,105 +19,6 @@ github_token = os.environ.get('PAPI_GITHUB_TOKEN', None) -def safe_hostname( - hostname: str, - job_uuid: str, - ): - - if hostname: # user provided a hostname - - # Forbid some hostnames to avoid confusions - if hostname.startswith('www') or hostname.startswith('http'): - raise HTTPException( - status_code=400, - detail="Hostname should not start with `www` or `http`." - ) - - # Replace all non-alphanumerical characters from hostname with hyphens - # to make it url safe - hostname = re.sub('[^0-9a-zA-Z]', '-', hostname) - - # Check url safety - if hostname.startswith('-'): - raise HTTPException( - status_code=400, - detail="Hostname should start with alphanumerical character." - ) - if hostname.endswith('-'): - raise HTTPException( - status_code=400, - detail="Hostname should end with alphanumerical character." - ) - if len(hostname) > 40: - raise HTTPException( - status_code=400, - detail="Hostname should be shorter than 40 characters." - ) - - return hostname - - else: # we use job_ID as default hostname - return job_uuid - - -def check_domain(base_url): - """ - Check if the domain is free so that we let user deploy to it. - We have to check all possible services that could be hosted in that domain. - - Parameters: - * **base_url** - - Returns None if the checks pass, otherwise raises an Exception. - """ - s_names = [ # all possible services - 'deepaas', - 'ide', - 'monitor', - 'fedserver', - ] - s_urls = [f"http://{name}-{base_url}" for name in s_names] - - for url in s_urls: - # First check if the URL is reachable - try: - r = session.get(url, timeout=5) - except requests.exceptions.ConnectionError: - # URL was not reachable therefore assumed empty - continue - except Exception: - # Other exception happened - raise HTTPException( - status_code=401, - detail=f"We had troubles reaching {url}. Make sure it is a valid domain, "\ - "otherwise contact with support." - ) - - # Domain still might be available if the error is a 404 coming **from Traefik**. - # We have to check that the error 404 thrown by Traefik and not by some other - # application. We do this by checking the headers. - # This is a hacky fix for a limitation in Traefik: - # https://github.com/traefik/traefik/issues/8141#issuecomment-844548035 - if r.status_code == 404: - traefik_headers = {'Content-Type', 'X-Content-Type-Options', 'Date', 'Content-Length'} - headers = set(dict(r.headers).keys()) - xcontent = r.headers.get('X-Content-Type-Options', None) - lcontent = r.headers.get('Content-Length', None) - - if (headers == traefik_headers) and \ - (xcontent == 'nosniff') and \ - (lcontent == '19'): - continue - - # In every other case, the URL is already in use. - raise HTTPException( - status_code=401, - detail=f"The domain {url} seems to be taken. Please try again with a new domain or leave the field empty." - ) - - return None - - def update_values_conf(submitted, reference): """ Update the reference YAML values configuration with a user submitted ones. @@ -168,13 +69,14 @@ def validate_conf(conf): if datasets: for d in datasets: - # Validate DOI + # Validate DOI and URL # ref: https://stackoverflow.com/a/48524047/18471590 - pattern = r"^10.\d{4,9}/[-._;()/:A-Z0-9]+$" - if not re.match(pattern, d['doi'], re.IGNORECASE): + doiPattern = r"^10.\d{4,9}/[-._;()/:A-Z0-9]+$" + urlPattern = r"https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*)" + if not (re.match(doiPattern, d['doi'], re.IGNORECASE) or re.match(urlPattern, d['doi'], re.IGNORECASE)): raise HTTPException( status_code=400, - detail="Invalid DOI." + detail="Invalid DOI or URL." ) # Check force pull parameter diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 432e32c..4829115 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -8,24 +8,28 @@ services: environment: - NOMAD_ADDR=https://193.146.75.205:4646 - ACCOUNTING_PTH=/home/ai4-accounting - - ZENODO_TOKEN=************************* + - ZENODO_TOKEN=***************************** + - PAPI_GITHUB_TOKEN=************************ + - MAILING_TOKEN=**************************** + - HARBOR_ROBOT_PASSWORD=******************** volumes: - - /home/ubuntu/nomad-certs/nomad-prod:/home/nomad-certs - - /mnt/ai4os-logs/ai4-accounting:/home/ai4-accounting + - /home/ubuntu/nomad-certs/nomad-federated:/home/nomad-certs + - /home/ubuntu/ai4-accounting:/home/ai4-accounting ports: - 8080:80 ai4-papi-dev: - # Right now dev is pointing to same services as prod. - # Will change in the future. - image: "registry.services.ai4os.eu/ai4os/ai4-papi:prod" + image: "registry.services.ai4os.eu/ai4os/ai4-papi:dev" restart: always environment: - NOMAD_ADDR=https://193.146.75.205:4646 - ACCOUNTING_PTH=/home/ai4-accounting - - ZENODO_TOKEN=************************* + - ZENODO_TOKEN=***************************** + - PAPI_GITHUB_TOKEN=************************ + - MAILING_TOKEN=**************************** + - HARBOR_ROBOT_PASSWORD=******************** volumes: - - /home/ubuntu/nomad-certs/nomad-prod:/home/nomad-certs - - /mnt/ai4os-logs/ai4-accounting:/home/ai4-accounting + - /home/ubuntu/nomad-certs/nomad-federated:/home/nomad-certs + - /home/ubuntu/ai4-accounting:/home/ai4-accounting ports: - 8081:80 diff --git a/etc/modules/user.yaml b/etc/modules/user.yaml index d4cafba..2f6a6b5 100644 --- a/etc/modules/user.yaml +++ b/etc/modules/user.yaml @@ -20,7 +20,6 @@ # - 127 GB RAM --> ~57 GB / gpu (reserving 10% for the node) general: - title: name: Deployment title value: '' @@ -31,11 +30,6 @@ general: value: '' description: Provide some additional extended information about this deployment. - hostname: - name: Custom domain - value: '' - description: Host domain where your application will be deployed. Selecting `myapp` will make your deployment accessible at `http://myapp.deployments.etc`). If not provided, hostname will automatically default to your `job-uuid`. - docker_image: name: Docker image value: 'deephdc/deep-oc-image-classification-tf' @@ -59,7 +53,6 @@ general: description: Select a password for your IDE (JupyterLab or VS Code). It should have at least 9 characters. hardware: - cpu_num: name: Number of CPUs value: 4 @@ -87,7 +80,6 @@ hardware: range: [1000, 50000] storage: - rclone_conf: name: RCLONE configuration value: '/srv/.rclone/rclone.conf' @@ -95,8 +87,8 @@ storage: rclone_url: name: Storage URL - value: 'https://share.services.ai4os.eu/remote.php/webdav/' - description: Remote storage link to be accessed via rclone (webdav) + value: '' + description: Remote storage link to be accessed via rclone (webdav). For example, in Nextcloud `https://share.services.ai4os.eu/remote.php/dav/files/` rclone_vendor: name: RCLONE vendor diff --git a/etc/tools/ai4os-federated-server/nomad.hcl b/etc/tools/ai4os-federated-server/nomad.hcl index 1ae7322..f29950c 100644 --- a/etc/tools/ai4os-federated-server/nomad.hcl +++ b/etc/tools/ai4os-federated-server/nomad.hcl @@ -155,6 +155,10 @@ job "tool-fl-${JOB_UUID}" { MU_FEDPROX = "${MU_FEDPROX}" FEDAVGM_SERVER_FL = "${FEDAVGM_SERVER_FL}" FEDAVGM_SERVER_MOMENTUM = "${FEDAVGM_SERVER_MOMENTUM}" + DP = "${DP}" + NOISE_MULT = "${NOISE_MULT}" + SAMPLED_CLIENTS = "${SAMPLED_CLIENTS}" + CLIP_NORM = "${CLIP_NORM}" } resources { diff --git a/etc/tools/ai4os-federated-server/user.yaml b/etc/tools/ai4os-federated-server/user.yaml index 5882d27..afda82f 100644 --- a/etc/tools/ai4os-federated-server/user.yaml +++ b/etc/tools/ai4os-federated-server/user.yaml @@ -20,11 +20,6 @@ general: value: '' description: Provide some additional extended information about this deployment. - hostname: - name: Hostname - value: '' - description: Host where your application will be deployed. Selecting `myapp` will make your deployment accessible at `http://myapp.deployments.etc`). If not provided, hostname will automatically default to your `job-uuid`. - docker_image: name: Docker image value: 'ai4oshub/ai4os-federated-server' @@ -118,3 +113,26 @@ configuration: value: 0 range: [0, null] description: In the FedAvgM strategy, server-side momentum factor. + + dp: + name: Differential privacy + value: False + options: [True, False] + + noise_mult: + name: Noise multiplier + value: 1 + range: [0.00001, null] + description: Noise multiplier for the Gaussian mechanism. + + sampled_clients: + name: Number of sampled clients + value: 2 + range: [2, null] + description: Number of clients sampled on each FL round. + + clip_norm: + name: Clipping norm + value: 0.1 + range: [0.1, null] + description: Threshold value for the clipping. diff --git a/etc/try_me/nomad.hcl b/etc/try_me/nomad.hcl index d11b580..e829962 100644 --- a/etc/try_me/nomad.hcl +++ b/etc/try_me/nomad.hcl @@ -20,7 +20,7 @@ job "try-${JOB_UUID}" { owner = "${OWNER}" # user-id from OIDC owner_name = "${OWNER_NAME}" owner_email = "${OWNER_EMAIL}" - title = "" + title = "${TITLE}" description = "" } diff --git a/tests/main.py b/tests/main.py index 0383b68..a676bc2 100644 --- a/tests/main.py +++ b/tests/main.py @@ -21,6 +21,7 @@ import catalog.tools import deployments.modules import deployments.tools +import try_me.test_nomad import routes import test_secrets import test_stats diff --git a/tests/routes.py b/tests/routes.py index ade0fb5..456cbdd 100644 --- a/tests/routes.py +++ b/tests/routes.py @@ -39,6 +39,8 @@ assert ('/v1/deployments/stats/cluster', {'GET'}) in routes assert ('/v1/try_me/nomad', {'POST'}) in routes +assert ('/v1/try_me/nomad', {'GET'}) in routes assert ('/v1/try_me/nomad/{deployment_uuid}', {'GET'}) in routes +assert ('/v1/try_me/nomad/{deployment_uuid}', {'DELETE'}) in routes print('Checks for API routes passed!') diff --git a/tests/try_me/test_nomad.py b/tests/try_me/test_nomad.py new file mode 100644 index 0000000..65d3a07 --- /dev/null +++ b/tests/try_me/test_nomad.py @@ -0,0 +1,70 @@ +import os +import time +from types import SimpleNamespace + +from ai4papi.routers.v1.try_me import nomad + + +# Retrieve EGI token (not generated on the fly in case the are rate limiting issues +# if too many queries) +token = os.getenv('TMP_EGI_TOKEN') +if not token: + raise Exception( +'Please remember to set a token as ENV variable before executing \ +the tests! \n\n \ + export TMP_EGI_TOKEN="$(oidc-token egi-checkin)" \n\n \ +If running from VScode make sure to launch `code` from that terminal so it can access \ +that ENV variable.' + ) + +# Create deployment +rcreate = nomad.create_deployment( + module_name="ai4os-demo-app", + title="PAPI tests", + authorization=SimpleNamespace( + credentials=token + ), +) +assert isinstance(rcreate, dict) +assert 'job_ID' in rcreate.keys() + +# Retrieve that deployment +rdep = nomad.get_deployment( + deployment_uuid=rcreate['job_ID'], + authorization=SimpleNamespace( + credentials=token + ), +) +assert isinstance(rdep, dict) +assert 'job_ID' in rdep.keys() +assert rdep['job_ID']==rcreate['job_ID'] + +# Retrieve all deployments +rdeps = nomad.get_deployments( + authorization=SimpleNamespace( + credentials=token + ), +) +assert isinstance(rdeps, list) +assert any([d['job_ID']==rcreate['job_ID'] for d in rdeps]) + +# Delete deployment +rdel = nomad.delete_deployment( + deployment_uuid=rcreate['job_ID'], + authorization=SimpleNamespace( + credentials=token + ), +) +time.sleep(3) # Nomad takes some time to delete +assert isinstance(rdel, dict) +assert 'status' in rdel.keys() + +# Check module no longer exists +rdeps3 = nomad.get_deployments( + authorization=SimpleNamespace( + credentials=token + ), +) +assert not any([d['job_ID']==rcreate['job_ID'] for d in rdeps3]) + +print('Try-me (nomad) tests passed!')