diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 244f0c13d..0373b4747 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -96,6 +96,7 @@ jobs: - "oist" - "pasteur" - "pawsey_nimbus" + - "pdc_kth" - "phoenix" - "prince" - "psmn" diff --git a/README.md b/README.md index 9ba844863..a34efddc3 100644 --- a/README.md +++ b/README.md @@ -152,6 +152,7 @@ Currently documentation is available for the following systems: - [OIST](docs/oist.md) - [PASTEUR](docs/pasteur.md) - [PAWSEY NIMBUS](docs/pawsey_nimbus.md) +- [PDC](docs/pdc_kth.md) - [PHOENIX](docs/phoenix.md) - [PRINCE](docs/prince.md) - [PSMN](docs/psmn.md) diff --git a/conf/pdc_kth.config b/conf/pdc_kth.config new file mode 100644 index 000000000..a8269a396 --- /dev/null +++ b/conf/pdc_kth.config @@ -0,0 +1,72 @@ +// Nextflow config for use with PDC at KTH + +def cluster = "unknown" + +try { + cluster = ['/bin/bash', '-c', 'sacctmgr show cluster -n | grep -o "^\s*[^ ]*\s*"'].execute().text.trim() +} catch (java.io.IOException e) { + System.err.println("WARNING: Could not run scluster, defaulting to unknown") +} + +params { + config_profile_description = 'PDC profile.' + config_profile_contact = 'Pontus Freyhult (@pontus)' + config_profile_url = "https://www.pdc.kth.se/" + + max_memory = 1790.GB + max_cpus = 256 + max_time = 7.d + + schema_ignore_params = "genomes,input_paths,cluster-options,clusterOptions,project,validationSchemaIgnoreParams" + validationSchemaIgnoreParams = "genomes,input_paths,cluster-options,clusterOptions,project,schema_ignore_params" +} + + +def containerOptionsCreator = { + switch(cluster) { + case "dardel": + return '-B /cfs/klemming/' + } + + return '' +} + +def clusterOptionsCreator = { mem,time -> + String base = "-A $params.project ${params.clusterOptions ?: ''}" + + switch(cluster) { + case "dardel": + String extra = '' + + if (mem <= 222.GB) { + if (time<24.d) { + extra += ' -p shared' + } else { + extra += ' -p long' + } + } else { + // Needs more than a thin node + // make an ugly hack to ignore runtime + extra += ' -p main,memory -t 1-0:0' + } + + return base+extra + } + + return base +} + + +singularity { + enabled = true + containerOptions = containerOptionsCreator +} + +process { + // Should we lock these to specific versions? + beforeScript = 'module load PDC singularity' + + executor = 'slurm' + clusterOptions = { clusterOptionsCreator(task.memory, task.time) } +} + diff --git a/docs/pdc_kth.md b/docs/pdc_kth.md new file mode 100644 index 000000000..74093defe --- /dev/null +++ b/docs/pdc_kth.md @@ -0,0 +1,47 @@ +# nf-core/configs: PDC Configuration + +nf-core pipelines have been successfully configured for use on the PDC +cluster dardel. No other clusters have yet been tested, but support can be +added if needed. + +## Getting started + +The base java installation on dardel is Java 11. By loading the `PDC` +and `Java` module, different versions (e.g. 17) are available. + +To pull new singularity images, singularity must be available +(e.g. through the module system) to the nextflow monitoring process, +suggested preparatory work before launching nextflow is: + +```shell + +module load PDC Java singularity +``` + +(for reproducibility, it may be a good idea to check what versions you +have loaded with `module list` and using those afterwards, e.g. +`module load PDC/22.06 singularity/3.10.4-cpeGNU-22.06 Java/17.0.4`.) + +No singularity images or nextflow versions are currently preloaded on +dardel, to get started you can e.g. download nextflow through + +```shell +wget https://raw.githubusercontent.com/nextflow-io/nextflow/master/nextflow && \ + chmod a+x nextflow +``` + +The profile `pdc_kth` has been provided for convenience, it expects you to +pass the project used for slurm accounting through `--project`, e.g. +`--project=nais2023-22-1027`. + +Due to [how partitions are set +up](https://www.pdc.kth.se/support/documents/run_jobs/job_scheduling.html#dardel-partitions) +on dardel, in particular the lack of long-runtime nodes with more +memory. Some runs may be difficult to get through. + +Note that node local scratch is not available and `SNIC_TMP` as well +as `PDC_TMP` point to a cluster-scratch area that will have similar +perfomance characteristics as your project storage. `/tmp` points to a +local `tmpfs` which uses RAM to store contents. Given that nodes don't +have swap space anything stored in `/tmp` will mean less memory is +available for your job. diff --git a/nfcore_custom.config b/nfcore_custom.config index a3cd5c905..e57871607 100644 --- a/nfcore_custom.config +++ b/nfcore_custom.config @@ -78,6 +78,7 @@ profiles { oist { includeConfig "${params.custom_config_base}/conf/oist.config" } pasteur { includeConfig "${params.custom_config_base}/conf/pasteur.config" } pawsey_nimbus { includeConfig "${params.custom_config_base}/conf/pawsey_nimbus.config" } + pdc_kth { includeConfig "${params.custom_config_base}/conf/pdc_kth.config" } phoenix { includeConfig "${params.custom_config_base}/conf/phoenix.config" } prince { includeConfig "${params.custom_config_base}/conf/prince.config" } psmn { includeConfig "${params.custom_config_base}/conf/psmn.config" }