diff --git a/404.html b/404.html index f611f22da..66deba8f0 100644 --- a/404.html +++ b/404.html @@ -1335,7 +1335,7 @@
  • - + diff --git a/adding_software/building_software/index.html b/adding_software/building_software/index.html index 59f954a90..7adcb811a 100644 --- a/adding_software/building_software/index.html +++ b/adding_software/building_software/index.html @@ -1459,7 +1459,7 @@
  • - + diff --git a/adding_software/contribution_policy/index.html b/adding_software/contribution_policy/index.html index f1e5837bb..70da4f649 100644 --- a/adding_software/contribution_policy/index.html +++ b/adding_software/contribution_policy/index.html @@ -1510,7 +1510,7 @@
  • - + diff --git a/adding_software/debugging_failed_builds/index.html b/adding_software/debugging_failed_builds/index.html index 2fefaa1f4..3b2efb3e4 100644 --- a/adding_software/debugging_failed_builds/index.html +++ b/adding_software/debugging_failed_builds/index.html @@ -1552,7 +1552,7 @@
  • - + diff --git a/adding_software/deploying_software/index.html b/adding_software/deploying_software/index.html index 70f0c366f..bb4dd81c6 100644 --- a/adding_software/deploying_software/index.html +++ b/adding_software/deploying_software/index.html @@ -1417,7 +1417,7 @@
  • - + diff --git a/adding_software/opening_pr/index.html b/adding_software/opening_pr/index.html index 294516f46..250d6fee0 100644 --- a/adding_software/opening_pr/index.html +++ b/adding_software/opening_pr/index.html @@ -1408,7 +1408,7 @@
  • - + diff --git a/adding_software/overview/index.html b/adding_software/overview/index.html index 424b91a41..1eef71109 100644 --- a/adding_software/overview/index.html +++ b/adding_software/overview/index.html @@ -1406,7 +1406,7 @@
  • - + diff --git a/blog/2024/05/17/isc24/index.html b/blog/2024/05/17/isc24/index.html index 75b12072d..8e7911d9d 100644 --- a/blog/2024/05/17/isc24/index.html +++ b/blog/2024/05/17/isc24/index.html @@ -1344,7 +1344,7 @@
  • - + diff --git a/blog/archive/2024/index.html b/blog/archive/2024/index.html index e2348cd1d..307f7bc7c 100644 --- a/blog/archive/2024/index.html +++ b/blog/archive/2024/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/blog/index.html b/blog/index.html index a71b0492b..90ae283e0 100644 --- a/blog/index.html +++ b/blog/index.html @@ -1344,7 +1344,7 @@
  • - + diff --git a/bot/index.html b/bot/index.html index d2486e2d2..580a525d1 100644 --- a/bot/index.html +++ b/bot/index.html @@ -1570,7 +1570,7 @@
  • - + diff --git a/compatibility_layer/index.html b/compatibility_layer/index.html index 09be7781f..8cc6bba0e 100644 --- a/compatibility_layer/index.html +++ b/compatibility_layer/index.html @@ -1358,7 +1358,7 @@
  • - + diff --git a/contact/index.html b/contact/index.html index a0824f7e2..43c39cced 100644 --- a/contact/index.html +++ b/contact/index.html @@ -1346,7 +1346,7 @@
  • - + diff --git a/filesystem_layer/index.html b/filesystem_layer/index.html index c9cf09d7c..82cc734e6 100644 --- a/filesystem_layer/index.html +++ b/filesystem_layer/index.html @@ -1406,7 +1406,7 @@
  • - + diff --git a/filesystem_layer/stratum1/index.html b/filesystem_layer/stratum1/index.html index a24a102bb..1550c4374 100644 --- a/filesystem_layer/stratum1/index.html +++ b/filesystem_layer/stratum1/index.html @@ -1442,7 +1442,7 @@
  • - + diff --git a/getting_access/eessi_container/index.html b/getting_access/eessi_container/index.html index c505ef4d8..f0a43ade8 100644 --- a/getting_access/eessi_container/index.html +++ b/getting_access/eessi_container/index.html @@ -1487,7 +1487,7 @@
  • - + diff --git a/getting_access/is_eessi_accessible/index.html b/getting_access/is_eessi_accessible/index.html index 67c864e6a..650d529b9 100644 --- a/getting_access/is_eessi_accessible/index.html +++ b/getting_access/is_eessi_accessible/index.html @@ -1358,7 +1358,7 @@
  • - + diff --git a/getting_access/native_installation/index.html b/getting_access/native_installation/index.html index e6d73958f..5301e03fe 100644 --- a/getting_access/native_installation/index.html +++ b/getting_access/native_installation/index.html @@ -1358,7 +1358,7 @@
  • - + diff --git a/gpu/index.html b/gpu/index.html index cc4fa3af9..d05605472 100644 --- a/gpu/index.html +++ b/gpu/index.html @@ -1498,7 +1498,7 @@
  • - + diff --git a/index.html b/index.html index 863ef3220..34aaa9c78 100644 --- a/index.html +++ b/index.html @@ -1393,7 +1393,7 @@
  • - + diff --git a/known_issues/eessi-2023.06/index.html b/known_issues/eessi-2023.06/index.html new file mode 100644 index 000000000..80e248bfa --- /dev/null +++ b/known_issues/eessi-2023.06/index.html @@ -0,0 +1,1809 @@ + + + + + + + + + + + + + + + + + + + + + + + + + v2023.06 - European Environment for Scientific Software Installations (EESSI) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + + + + Skip to content + + +
    +
    + +
    + + + + + + +
    + + +
    + +
    + + + + + + +
    +
    + + + +
    +
    +
    + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Known issues

    +

    EESSI Production Repository (v2023.06)

    +

    Failed to modify UD QP to INIT on mlx5_0: Operation not permitted

    +
    + +

    This is an error that occurs with OpenMPI after updating to OFED 23.10.

    + +

    Their is an upstream issue on this problem opened with EasyBuild. +See: https://github.com/easybuilders/easybuild-easyconfigs/issues/20233

    + +Workarounds + +

    You can instruct OpenMPI to not use libfabric and turn off `uct`(see https://openucx.readthedocs.io/en/master/running.html#running-mpi) by passing the following options to `mpirun`:

    + +
    mpirun -mca pml ucx -mca btl '^uct,ofi' -mca mtl '^ofi'
    +
    + +Or equivalently, you can set the following environment variables: + +
    export OMPI_MCA_btl='^uct,ofi'
    +export OMPI_MCA_pml='ucx'
    +export OMPI_MCA_mtl='^ofi'
    +
    +
    + + + + + + + + + + + + + + + + + + + + +
    +
    + + + +
    + +
    + + + +
    +
    +
    +
    + + + + + + + + + + + + \ No newline at end of file diff --git a/meetings/2022-09-amsterdam/index.html b/meetings/2022-09-amsterdam/index.html index 4958c0a47..9b70ad9ae 100644 --- a/meetings/2022-09-amsterdam/index.html +++ b/meetings/2022-09-amsterdam/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/meetings/index.html b/meetings/index.html index 3a0a049c1..200b6b283 100644 --- a/meetings/index.html +++ b/meetings/index.html @@ -1346,7 +1346,7 @@
  • - + diff --git a/overview/index.html b/overview/index.html index 2140297e7..4f8822841 100644 --- a/overview/index.html +++ b/overview/index.html @@ -1442,7 +1442,7 @@
  • - + diff --git a/partners/index.html b/partners/index.html index 565eb298c..549df03de 100644 --- a/partners/index.html +++ b/partners/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/repositories/pilot/index.html b/repositories/pilot/index.html index 2393ab6b1..8f4d60b69 100644 --- a/repositories/pilot/index.html +++ b/repositories/pilot/index.html @@ -1539,7 +1539,7 @@
  • - + diff --git a/repositories/riscv.eessi.io/index.html b/repositories/riscv.eessi.io/index.html index 598266937..c40359b82 100644 --- a/repositories/riscv.eessi.io/index.html +++ b/repositories/riscv.eessi.io/index.html @@ -1417,7 +1417,7 @@
  • - + diff --git a/repositories/software.eessi.io/index.html b/repositories/software.eessi.io/index.html index 6fd8bbdf0..1e12baef3 100644 --- a/repositories/software.eessi.io/index.html +++ b/repositories/software.eessi.io/index.html @@ -1450,7 +1450,7 @@
  • - + diff --git a/search/search_index.json b/search/search_index.json index 19fd95092..48a3cad37 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Welcome to the EESSI project documentation!","text":"

    Quote

    What if there was a way to avoid having to install a broad range of scientific software from scratch on every HPC cluster or cloud instance you use or maintain, without compromising on performance?

    The European Environment for Scientific Software Installations (EESSI, pronounced as \"easy\") is a collaboration between different European partners in HPC community. The goal of this project is to build a common stack of scientific software installations for HPC systems and beyond, including laptops, personal workstations and cloud infrastructure.

    "},{"location":"#quick-links","title":"Quick links","text":"
    • What is EESSI?
    • Contact info

    For users:

    • software.eessi.io repository
    • Access, initialize and use EESSI
    • How to run EESSI test suite
    • Get help or report issue

    For system administrators:

    • EESSI layered structure: filesystem, compatibility, software
    • Installing EESSI
    • Setting up a mirror server

    For contributors:

    • Adding software to EESSI
    • Meetings

    The EESSI project was covered during a quick AWS HPC Tech Short video (15 June 2023):

    "},{"location":"bot/","title":"Build-test-deploy bot","text":"

    Building, testing, and deploying software is done by one or more bot instances.

    The EESSI build-test-deploy bot is implemented as a GitHub App in the eessi-bot-software-layer repository.

    It operates in the context of pull requests to the compatibility-layer repository or the software-layer repository, and follows the instructions supplied by humans, so the procedure of adding software to EESSI is semi-automatic.

    It leverages the scripts provided in the bot/ subdirectory of the target repository (see for example here), like bot/build.sh to build software, and bot/check-result.sh to check whether the software was built correctly.

    "},{"location":"bot/#high-level-design","title":"High-level design","text":"

    The bot consists of two components: the event handler, and the job manager.

    "},{"location":"bot/#event-handler","title":"Event handler","text":"

    The bot event handler is responsible for handling GitHub events for the GitHub repositories it is registered to.

    It is triggered for every event that it receives from GitHub. Most events are ignored, but specific events trigger the bot to take action.

    Examples of actionable events are submitting of a comment that starts with bot:, which may specify an instruction for the bot like building software, or adding a bot:deploy label (see deploying).

    "},{"location":"bot/#job-manager","title":"Job manager","text":"

    The bot job manager is responsible for monitoring the queued and running jobs, and reporting back when jobs completed.

    It runs every couple of minutes as a cron job.

    "},{"location":"bot/#basics","title":"Basics","text":"

    Instructions for the bot should always start with bot:.

    To get help from the bot, post a comment with bot: help.

    To make the bot report how it is configured, post a comment with bot: show_config.

    "},{"location":"bot/#permissions","title":"Permissions","text":"

    The bot is configured to only act on instructions issued by specific GitHub accounts.

    There are separate configuration options for allowing to send instructions to the bot, to trigger building of software, and to deploy software installations in to the EESSI repository.

    Note

    Ask for help in the #software-layer-bot channel of the EESSI Slack if needed!

    "},{"location":"bot/#building","title":"Building","text":"

    To instruct the bot to build software, one or more build instructions should be issued by posting a comment in the pull request (see also here).

    The most basic build instruction that can be sent to the bot is:

    bot: build\n

    Warning

    Only use bot: build if you are confident that it is OK to do so.

    Most likely, you want to supply one or more filters to avoid that the bot builds for all its configurations.

    "},{"location":"bot/#filters","title":"Filters","text":"

    Build instructions can include filters that are applied by each bot instance to determine which builds should be executed, based on:

    • instance: the name of the bot instance, for example instance:aws for the bot instance running in AWS;
    • repository: the target repository, for example eessi-2023.06-software which corresponds to the 2023.06 version of the EESSI software layer;
    • architecture: the name of the CPU microarchitecture, for example x86_64/amd/zen2;

    Note

    Use : as separator to specify a value for a particular filter, do not add spaces after the :.

    The bot recognizes shorthands for the supported filters, so you can use inst:... instead of instance:..., repo:... instead of repository:..., and arch:... instead of architecture:....

    "},{"location":"bot/#combining-filters","title":"Combining filters","text":"

    You can combine multiple filters in a single build instruction. Separate filters with a space, order of filters does not matter.

    For example:

    bot: build repo:eessi-hpc.org-2023.06-software arch:x86_64/amd/zen2\n
    "},{"location":"bot/#multiple-build-instructions","title":"Multiple build instructions","text":"

    You can issue multiple build instructions in a single comment, even across multiple bot instances, repositories, and CPU targets. Specify one build instruction per line.

    For example:

    bot: build repo:eessi-hpc.org-2023.06-software arch:x86_64/amd/zen3 inst:aws\nbot: build repo:eessi-hpc.org-2023.06-software arch:aarch64/generic inst:azure\n

    Note

    The bot applies the filters with partial matching, which you can use to combine multiple build instructions into a single one.

    For example, if you only want to build for all aarch64 CPU targets, you can use arch:aarch64 as filter.

    The same applies to the instance and repository filters.

    "},{"location":"bot/#behind-the-scenes","title":"Behind-the-scenes","text":""},{"location":"bot/#processing-build-instructions","title":"Processing build instructions","text":"

    When the bot receives build instructions through a comment in a pull request, they are processed by the event handler component. It will:

    1) Combine its active configuration (instance name, repositories, supported CPU targets) and the build instructions to prepare a list of jobs to submit;

    2) Create a working directory for each job, including a Slurm job script that runs the bot/build.sh script in the context of the changes proposed in the pull request to build the software, and runs bot/check-result.sh script at the end to check whether the build was successful;

    3) Submit each prepared job to a workernode that can build for the specified CPU target, and put a hold on it.

    "},{"location":"bot/#managing-build-jobs","title":"Managing build jobs","text":"

    During the next iteration of the job manager, the submitted jobs are released and queued for execution.

    The job manager also monitors the running jobs at regular intervals, and reports back in the pull request when a job has completed. It also reports the result (SUCCESS or FAILURE ), based on the result of the bot/check-result.sh script.

    "},{"location":"bot/#artefacts","title":"Artefacts","text":"

    If all goes well, each job should produce a tarball as an artefact, which contains the software installations and the corresponding environment module files.

    The message reported by the job manager provides an overview of the contents of the artefact, which was created by the bot/check-result.sh script.

    "},{"location":"bot/#testing","title":"Testing","text":"

    Warning

    The test phase is not implemented yet in the bot.

    We intend to use the EESSI test suite in different OS configurations to verify that the software that was built works as expected.

    "},{"location":"bot/#deploying","title":"Deploying","text":"

    To deploy the artefacts that were obtained in the build phase, you should add the bot: deploy label to the pull request.

    This will trigger the event handler to upload the artefacts for ingestion into the EESSI repository.

    "},{"location":"bot/#behind-the-scenes_1","title":"Behind-the-scenes","text":"

    The current setup for the software-layer repository, is as follows:

    • The bot deploys the artefacts (tarballs) to an S3 bucket in AWS, along with a metadata file, using the eessi-upload-to-staging script;
    • A cron job that runs every couple of minutes on the CernVM-FS Stratum-0 server opens a pull request to the (private) EESSI/staging repository, to move the metadata file for each uploaded tarball from the staged to the approved directory;
    • Once that pull request gets merged, the target is automatically ingested into the EESSI repository by a cron job on the Stratum-0 server, and the metadata file is moved from approved to ingested in the EESSI/staging repository;
    "},{"location":"compatibility_layer/","title":"Compatibility layer","text":"

    The middle layer of the EESSI project is the compatibility layer, which ensures that our scientific software stack is compatible with different client operating systems (different Linux distributions, macOS and even Windows via WSL).

    For this we rely on Gentoo Prefix, by installing a limited set of Gentoo Linux packages in a non-standard location (a \"prefix\"), using Gentoo's package manager Portage.

    The compatible layer is maintained via our https://github.com/EESSI/compatibility-layer GitHub repository.

    "},{"location":"contact/","title":"Contact info","text":"

    For more information:

    • Visit our website
    • Consult our documentation
    • Ask for help at our support portal
    • Join our Slack channel
    • Reach out to one of the project partners
    • Check out our GitHub repositories
    • Follow us on Twitter

    "},{"location":"filesystem_layer/","title":"Filesystem layer","text":""},{"location":"filesystem_layer/#cernvm-file-system-cernvm-fs","title":"CernVM File System (CernVM-FS)","text":"

    The bottom layer of the EESSI project is the filesystem layer, which is responsible for distributing the software stack.

    For this we rely on CernVM-FS (or CVMFS for short), a network file system used to distribute the software to the clients in a fast, reliable and scalable way.

    CVMFS was created over 10 years ago specifically for the purpose of globally distributing a large software stack. For the experiments at the Large Hadron Collider, it hosts several hundred million files and directories that are distributed to the order of hundred thousand client computers.

    The hierarchical structure with multiple caching layers (Stratum-0, Stratum-1's located at partner sites and local caching proxies) ensures good performance with limited resources. Redundancy is provided by using multiple Stratum-1's at various sites. Since CVMFS is based on the HTTP protocol, the ubiquitous Squid caching proxy can be leveraged to reduce server loads and improve performance at large installations (such as HPC clusters). Clients can easily mount the file system (read-only) via a FUSE (Filesystem in Userspace) module.

    For a (basic) introduction to CernVM-FS, see this presentation.

    Detailed information about how we configure CVMFS is available at https://github.com/EESSI/filesystem-layer.

    "},{"location":"filesystem_layer/#eessi-infrastructure","title":"EESSI infrastructure","text":"

    For both the pilot and production repositories, EESSI hosts a CernVM-FS Stratum 0 and a number of public Stratum 1 servers. Client systems using EESSI by default connect against the public EESSI CernVM-FS Stratum 1 servers. The status of the infrastructure for the pilot repository is displayed at http://status.eessi-infra.org, while for the production repository it is displayed at https://status.eessi.io.

    "},{"location":"gpu/","title":"GPU support","text":"

    More information on the actions that must be performed to ensure that GPU software included in EESSI can use the GPU in your system is available below.

    Please open a support issue if you need help or have questions regarding GPU support.

    Make sure the ${EESSI_VERSION} version placeholder is defined!

    In this page, we use ${EESSI_VERSION} as a placeholder for the version of the EESSI repository, for example:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}\n

    Before inspecting paths, or executing any of the specified commands, you should define $EESSI_VERSION first, for example with:

    export EESSI_VERSION=2023.06\n

    "},{"location":"gpu/#nvidia","title":"Support for using NVIDIA GPUs","text":"

    EESSI supports running CUDA-enabled software. All CUDA-enabled modules are marked with the (gpu) feature, which is visible in the output produced by module avail.

    "},{"location":"gpu/#nvidia_drivers","title":"NVIDIA GPU drivers","text":"

    For CUDA-enabled software to run, it needs to be able to find the NVIDIA GPU drivers of the host system. The challenge here is that the NVIDIA GPU drivers are not always in a standard system location, and that we can not install the GPU drivers in EESSI (since they are too closely tied to the client OS and GPU hardware).

    "},{"location":"gpu/#cuda_sdk","title":"Compiling CUDA software","text":"

    An additional requirement is necessary if you want to be able to compile CUDA-enabled software using a CUDA installation included in EESSI. This requires a full CUDA SDK, but the CUDA SDK End User License Agreement (EULA) does not allow for full redistribution. In EESSI, we are (currently) only allowed to redistribute the files needed to run CUDA software.

    Full CUDA SDK only needed to compile CUDA software

    Without a full CUDA SDK on the host system, you will still be able to run CUDA-enabled software from the EESSI stack, you just won't be able to compile additional CUDA software.

    Below, we describe how to make sure that the EESSI software stack can find your NVIDIA GPU drivers and (optionally) full installations of the CUDA SDK.

    "},{"location":"gpu/#host_injections","title":"host_injections variant symlink","text":"

    In the EESSI repository, a special directory has been prepared where system administrators can install files that can be picked up by software installations included in EESSI. This gives the ability to administrators to influence the behaviour (and capabilities) of the EESSI software stack.

    This special directory is located in /cvmfs/software.eessi.io/host_injections, and it is a CernVM-FS Variant Symlink: a symbolic link for which the target can be controlled by the CernVM-FS client configuration (for more info, see 'Variant Symlinks' in the official CernVM-FS documentation).

    Default target for host_injections variant symlink

    Unless otherwise configured in the CernVM-FS client configuration for the EESSI repository, the host_injections symlink points to /opt/eessi on the client system:

    $ ls -l /cvmfs/software.eessi.io/host_injections\nlrwxrwxrwx 1 cvmfs cvmfs 10 Oct  3 13:51 /cvmfs/software.eessi.io/host_injections -> /opt/eessi\n

    As an example, let's imagine that we want to use a architecture-specific location on a shared filesystem as the target for the symlink. This has the advantage that one can make changes under host_injections that affect all nodes which share that CernVM-FS configuration. Configuring this in your CernVM-FS configuration would mean adding the following line in the client configuration file:

    EESSI_HOST_INJECTIONS=/shared_fs/path\n

    Don't forget to reload the CernVM-FS configuration

    After making a change to a CernVM-FS configuration file, you also need to reload the configuration:

    sudo cvmfs_config reload\n

    All CUDA-enabled software in EESSI expects the CUDA drivers to be available in a specific subdirectory of this host_injections directory. In addition, installations of the CUDA SDK included EESSI are stripped down to the files that we are allowed to redistribute; all other files are replaced by symbolic links that point to another specific subdirectory of host_injections. For example:

    $ ls -l /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen3/software/CUDA/12.1.1/bin/nvcc\nlrwxrwxrwx 1 cvmfs cvmfs 109 Dec 21 14:49 /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen3/software/CUDA/12.1.1/bin/nvcc -> /cvmfs/software.eessi.io/host_injections/2023.06/software/linux/x86_64/amd/zen3/software/CUDA/12.1.1/bin/nvcc\n

    If the corresponding full installation of the CUDA SDK is available there, the CUDA installation included in EESSI can be used to build CUDA software.

    "},{"location":"gpu/#nvidia_eessi_native","title":"Using NVIDIA GPUs via a native EESSI installation","text":"

    Here, we describe the steps to enable GPU support when you have a native EESSI installation on your system.

    Required permissions

    To enable GPU support for EESSI on your system, you will typically need to have system administration rights, since you need write permissions on the folder to the target directory of the host_injections symlink.

    "},{"location":"gpu/#exposing-nvidia-gpu-drivers","title":"Exposing NVIDIA GPU drivers","text":"

    To install the symlinks to your GPU drivers in host_injections, run the link_nvidia_host_libraries.sh script that is included in EESSI:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/scripts/gpu_support/nvidia/link_nvidia_host_libraries.sh\n

    This script uses ldconfig on your host system to locate your GPU drivers, and creates symbolic links to them in the correct location under host_injections directory. It also stores the CUDA version supported by the driver that the symlinks were created for.

    Re-run link_nvidia_host_libraries.sh after NVIDIA GPU driver update

    You should re-run this script every time you update the NVIDIA GPU drivers on the host system.

    Note that it is safe to re-run the script even if no driver updates were done: the script should detect that the current version of the drivers were already symlinked.

    "},{"location":"gpu/#installing-full-cuda-sdk-optional","title":"Installing full CUDA SDK (optional)","text":"

    To install a full CUDA SDK under host_injections, use the install_cuda_host_injections.sh script that is included in EESSI:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/scripts/gpu_support/nvidia/install_cuda_host_injections.sh\n

    For example, to install CUDA 12.1.1 in the directory that the host_injections variant symlink points to, using /tmp/$USER/EESSI as directory to store temporary files:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/scripts/gpu_support/nvidia/install_cuda_host_injections.sh --cuda-version 12.1.1 --temp-dir /tmp/$USER/EESSI --accept-cuda-eula\n
    You should choose the CUDA version you wish to install according to what CUDA versions are included in EESSI; see the output of module avail CUDA/ after setting up your environment for using EESSI.

    You can run /cvmfs/software.eessi.io/scripts/install_cuda_host_injections.sh --help to check all of the options.

    Tip

    This script uses EasyBuild to install the CUDA SDK. For this to work, two requirements need to be satisfied:

    • module load EasyBuild should work (or the eb command is already available in the environment);
    • The version of EasyBuild being used should provide the requested version of the CUDA easyconfig file (in the example case above, that's CUDA-12.1.1.eb).

    You can rely on the EasyBuild installation that is included in EESSI for this.

    Alternatively, you may load an EasyBuild module manually before running the install_cuda_host_injections.sh script to make an eb command available.

    "},{"location":"gpu/#nvidia_eessi_container","title":"Using NVIDIA GPUs via EESSI in a container","text":"

    We focus here on the Apptainer/Singularity use case, and have only tested the --nv option to enable access to GPUs from within the container.

    If you are using the EESSI container to access the EESSI software, the procedure for enabling GPU support is slightly different and will be documented here eventually.

    "},{"location":"gpu/#exposing-nvidia-gpu-drivers_1","title":"Exposing NVIDIA GPU drivers","text":"

    When running a container with apptainer or singularity it is not necessary to run the install_cuda_host_injections.sh script since both these tools use $LD_LIBRARY_PATH internally in order to make the host GPU drivers available in the container.

    The only scenario where this would be required is if $LD_LIBRARY_PATH is modified or undefined.

    "},{"location":"gpu/#gpu_cuda_testing","title":"Testing the GPU support","text":"

    The quickest way to test if software installations included in EESSI can access and use your GPU is to run the deviceQuery executable that is part of the CUDA-Samples module:

    module load CUDA-Samples\ndeviceQuery\n
    If both are successful, you should see information about your GPU printed to your terminal.

    "},{"location":"meetings/","title":"Meetings","text":""},{"location":"meetings/#monthly-meetings-online","title":"Monthly meetings (online)","text":"

    Online EESSI update meeting, every 1st Thursday of the month at 14:00 CE(S)T.

    More info can be found on the EESSI wiki.

    "},{"location":"meetings/#physical-meetings","title":"Physical meetings","text":"
    • EESSI Community Meeting in Amsterdam (NL), 14-16 Sept 2022
    "},{"location":"meetings/#physical-meetings-archive","title":"Physical meetings (archive)","text":""},{"location":"meetings/#2020","title":"2020","text":"
    • Meeting in Groningen (NL), 16 Jan 2020
    • Meeting in Delft (NL), 5 Mar 2020
    "},{"location":"meetings/#2019","title":"2019","text":"
    • Meeting in Cambridge (UK), 20-21 May 2019
    "},{"location":"overview/","title":"Overview of the EESSI project","text":""},{"location":"overview/#scope-goals","title":"Scope & Goals","text":"

    Through the EESSI project, we want to set up a shared stack of scientific software installations, and by doing so avoid a lot of duplicate work across HPC sites.

    For end users, we want to provide a uniform user experience with respect to available scientific software, regardless of which system they use.

    Our software stack should work on laptops, personal workstations, HPC clusters and in the cloud, which means we will need to support different CPUs, networks, GPUs, and so on. We hope to make this work for any Linux distribution and maybe even macOS and Windows via WSL, and a wide variety of CPU architectures (Intel, AMD, ARM, POWER, RISC-V).

    Of course we want to focus on the performance of the software, but also on automating the workflow for maintaining the software stack, thoroughly testing the installations, and collaborating efficiently.

    "},{"location":"overview/#inspiration","title":"Inspiration","text":"

    The EESSI concept is heavily inspired by Compute Canada software stack, which is a shared software stack used on all 5 major national systems in Canada and a bunch of smaller ones.

    The design of the Compute Canada software stack is discussed in detail in the PEARC'19 paper \"Providing a Unified Software Environment for Canada\u2019s National Advanced Computing Centers\".

    It has also been presented at the 5th EasyBuild User Meetings (slides, recorded talk), and is well documented.

    "},{"location":"overview/#layered-structure","title":"Layered structure","text":"

    The EESSI project consists of 3 layers.

    The bottom layer is the filesystem layer, which is responsible for distributing the software stack across clients.

    The middle layer is a compatibility layer, which ensures that the software stack is compatible with multiple different client operating systems.

    The top layer is the software layer, which contains the actual scientific software applications and their dependencies.

    The host OS still provides a couple of things, like drivers for network and GPU, support for shared filesystems like GPFS and Lustre, a resource manager like Slurm, and so on.

    "},{"location":"overview/#opportunities","title":"Opportunities","text":"

    We hope to collaborate with interested parties across the HPC community, including HPC centres, vendors, consultancy companies and scientific software developers.

    Through our software stack, HPC users can seamlessly hop between sites, since the same software is available everywhere.

    We can leverage each others work with respect to providing tested and properly optimized scientific software installations more efficiently, and provide a platform for easy benchmarking of new systems.

    By working together with the developers of scientific software we can provide vetted installations for the broad HPC community.

    "},{"location":"overview/#challenges","title":"Challenges","text":"

    There are many challenges in an ambitious project like this, including (but probably not limited to):

    • Finding time and manpower to get the software stack set up properly;
    • Leveraging system sources like network interconnect (MPI & co), accelerators (GPUs), ...;
    • Supporting CPU architectures other than x86_64, including ARM, POWER, RISC-V, ...
    • Dealing with licensed software, like Intel tools, MATLAB, ANSYS, ...;
    • Integration with resource managers (Slurm) and vendor provided software (Cray PE);
    • Convincing HPC site admins to adopt EESSI;
    "},{"location":"overview/#current-status","title":"Current status","text":"

    (June 2020)

    We are actively working on the EESSI repository, and are organizing monthly meetings to discuss progress and next steps forward.

    Keep an eye on our GitHub repositories at https://github.com/EESSI and our Twitter feed.

    "},{"location":"partners/","title":"Project partners","text":""},{"location":"partners/#delft-university-of-technology-the-netherlands","title":"Delft University of Technology (The Netherlands)","text":"
    • Robbert Eggermont
    • Koen Mulderij
    "},{"location":"partners/#dell-technologies-europe","title":"Dell Technologies (Europe)","text":"
    • Walther Blom, High Education & Research
    • Jaco van Dijk, Higher Education
    "},{"location":"partners/#eindhoven-university-of-technology","title":"Eindhoven University of Technology","text":"
    • Alain van Hoof, HPC-Lab
    "},{"location":"partners/#ghent-university-belgium","title":"Ghent University (Belgium)","text":"
    • Kenneth Hoste, HPC-UGent
    "},{"location":"partners/#hpcnow-spain","title":"HPCNow! (Spain)","text":"
    • Oriol Mula Valls
    "},{"location":"partners/#julich-supercomputing-centre-germany","title":"J\u00fclich Supercomputing Centre (Germany)","text":"
    • Alan O'Cais
    "},{"location":"partners/#university-of-cambridge-united-kingdom","title":"University of Cambridge (United Kingdom)","text":"
    • Mark Sharpley, Research Computing Services Division
    "},{"location":"partners/#university-of-groningen-the-netherlands","title":"University of Groningen (The Netherlands)","text":"
    • Bob Dr\u00f6ge, Center for Information Technology
    • Henk-Jan Zilverberg, Center for Information Technology
    "},{"location":"partners/#university-of-twente-the-netherlands","title":"University of Twente (The Netherlands)","text":"
    • Geert Jan Laanstra, Electrical Engineering, Mathematics and Computer Science (EEMCS)
    "},{"location":"partners/#university-of-oslo-norway","title":"University of Oslo (Norway)","text":"
    • Terje Kvernes
    "},{"location":"partners/#university-of-bergen-norway","title":"University of Bergen (Norway)","text":"
    • Thomas R\u00f6blitz
    "},{"location":"partners/#vrije-universiteit-amsterdam-the-netherlands","title":"Vrije Universiteit Amsterdam (The Netherlands)","text":"
    • Peter Stol
    "},{"location":"partners/#surf-the-netherlands","title":"SURF (The Netherlands)","text":"
    • Caspar van Leeuwen
    • Marco Verdicchio
    • Bas van der Vlies
    "},{"location":"software_layer/","title":"Software layer","text":"

    The top layer of the EESSI project is the software layer, which provides the actual scientific software installations.

    To install the software we include in our stack, we use EasyBuild, a framework for installing scientific software on HPC systems. These installations are optimized for a particular system architecture (specific CPU and GPU generation).

    To access these software installation we provide environment module files and use Lmod, a modern environment modules tool which has been widely adopted in the HPC community in recent years.

    We leverage the archspec Python library to automatically select the best suited part of the software stack for a particular host, based on its system architecture.

    The software layer is maintained through our https://github.com/EESSI/software-layer GitHub repository.

    "},{"location":"software_testing/","title":"Software testing","text":"

    This page has been replaced with test-suite, update your bookmarks!

    "},{"location":"support/","title":"Getting support for EESSI","text":"

    Thanks to the MultiXscale EuroHPC project we are able to provide support to the users of EESSI.

    The EESSI support portal is hosted in GitLab: https://gitlab.com/eessi/support.

    "},{"location":"support/#open-issue","title":"How to report a problem or ask a question","text":"

    We recommend you to use a GitLab account if you want to get help from the EESSI support team.

    If you have a GitLab account you can submit your problems or questions on EESSI via the issue tracker of the EESSI support portal at https://gitlab.com/eessi/support/-/issues. Please use one of the provided templates (report a problem, software request, question, ...) when creating an issue.

    You can also contact us via our e-mail address support (@) eessi.io, which will automatically create a (private) issue in the EESSI support portal. When you send us an email, please provide us with as much information as possible on your question or problem. You can find an overview of the information that we would like to receive in the README of the EESSI support portal.

    "},{"location":"support/#level-of-support","title":"Level of Support","text":"

    We provide support for EESSI according to a \"reasonable effort\" standard. That means we will go into reasonable effort to help you, but we may not have the time to explore every potential cause, and it may not lead to a (quick) solution. You can compare this to the level of support you typically get from other active open source projects.

    Note that the more complete your reported issue is (e.g. description of the error, what you ran, the software environment in which you ran, minimal reproducer, etc.) the bigger the chance is that we can help you with \"reasonable effort\".

    "},{"location":"support/#what-do-we-provide-support-for","title":"What do we provide support for","text":""},{"location":"support/#accessing-and-using-the-eessi-software-stack","title":"Accessing and using the EESSI software stack","text":"

    If you have trouble connecting to the software stack, such as trouble related to installing or configuring CernVM-FS to access the EESSI filesystem layer, or running the software installations included in the EESSI compatibility layer or software layer, please contact us.

    Note that we can only help with problems related to the software installations (getting the software to run, to perform as expected, etc.). We do not provide support for using specific features of the provided software, nor can we fix (known or unknown) bugs in the software included in EESSI. We can only help with diagnosing and fixing problems that are caused by how the software was built and installed in EESSI.

    "},{"location":"support/#software-requests","title":"Software requests","text":"

    We are open to software requests for software that is not included in EESSI yet.

    The quickest way to add additional software to EESSI is by contributing it yourself as a community contribution, please see the documentation on adding software.

    Alternatively, you can send in a request to our support team. Please try to provide as much information on the software as possible: preferably use the issue template (which requires you to log in to GitLab), or make sure to cover the items listed here.

    Be aware that we can only provide software that has an appropriate open source license.

    "},{"location":"support/#eessi-test-suite","title":"EESSI test suite","text":"

    If you are using the EESSI test suite, you can get help via the EESSI support portal.

    "},{"location":"support/#build-and-deploy-bot","title":"Build-and-deploy bot","text":"

    If you are using the EESSI build-and-deploy bot, you can get help via the EESSI support portal.

    "},{"location":"support/#what-do-we-not-provide-support-for","title":"What do we not provide support for","text":"

    Do not contact the EESSI support team to get help with using software that is included in EESSI, unless you think the problems you are seeing are related to how the software was built and installed.

    Please consult the documentation of the software you are using, or contact the developers of the software directly, if you have questions regarding using the software, or if you think you have found a bug.

    Funded by the European Union. This work has received funding from the European High Performance Computing Joint Undertaking (JU) and countries participating in the project under grant agreement No 101093169.

    "},{"location":"talks/","title":"Talks related to EESSI","text":""},{"location":"talks/#2023","title":"2023","text":"
    • Streaming Optimised Scientific Software: an Introduction to EESSI (online tutorial, 5 Dec 2023)
    • Best Practices for CernVM-FS in HPC (online tutorial, 4 Dec 2023)
    • Streaming optimized scientific software installations on any Linux distro with EESSI (PackagingCon 2023, 27 Oct 2023)
    • Making scientific software EESSI - and fast (8-min AWS HPC Tech Short, 15 June 2023)
    "},{"location":"adding_software/building_software/","title":"Building software","text":"

    (for maintainers)

    "},{"location":"adding_software/building_software/#bot_build","title":"Instructing the bot to build","text":"

    Once the pull request is open, you can instruct the bot to build the software by posting a comment.

    For more information, see the building section in the bot documentation.

    Warning

    Permission to trigger building of software must be granted to your GitHub account first!

    See bot permissions for more information.

    "},{"location":"adding_software/building_software/#guidelines","title":"Guidelines","text":"
    • It may be wise to let the bot perform a test build first, rather than letting it build for a wide range of CPU targets.

    • If one of the builds failed, you can let the bot retry that specific build.

    • Make sure that the software has been built correctly for all CPU targets before you deploy!

    "},{"location":"adding_software/building_software/#checking-the-builds","title":"Checking the builds","text":"

    If all goes well, you should see SUCCESS for each build, along with button to get more information about the checks that were performed, and metadata information on the resulting artefact .

    Note

    Make sure the result is what you expect it to be for all builds before you deploy!

    "},{"location":"adding_software/building_software/#failing-builds","title":"Failing builds","text":"

    Warning

    The bot will currently not give you any information on how or why a build is failing.

    Ask for help in the #software-layer channel of the EESSI Slack if needed!

    "},{"location":"adding_software/building_software/#instructing-the-bot-to-deploy","title":"Instructing the bot to deploy","text":"

    To make the bot deploy the successfully built software, you should issue the corresponding instruction to the bot.

    For more information, see the deploying section in the bot documentation.

    Warning

    Permission to trigger deployment of software installations must be granted to your GitHub account first!

    See bot permissions for more information.

    "},{"location":"adding_software/building_software/#merging-the-pull-request","title":"Merging the pull request","text":"

    You should be able to verify in the pull request that the ingestion has been done, since the CI should fail initially to indicate that some software installations listed in your modified easystack are missing.

    Once the ingestion has been done, simply re-triggering the CI workflow should be sufficient to make it pass , and then the pull request can be merged.

    Note

    This assumes that the easystack file being modified is considered by the CI workflow file (.github/workflows/test_eessi.yml) that checks for missing installations, in the correct branch (for example 2023.06) of the software-layer.

    If that's not the case yet, update this workflow in your pull request as well to add the missing easystack file!

    Warning

    You need permissions to re-trigger CI workflows and merge pull requests in the software-layer repository.

    Ask for help in the #software-layer channel of the EESSI Slack if needed!

    "},{"location":"adding_software/building_software/#getting-help","title":"Getting help","text":"

    If you have any questions, or if you need help with something, don't hesitate to contact us via the #software-layer channel of the EESSI Slack.

    "},{"location":"adding_software/contribution_policy/","title":"Contribution policy","text":"

    (version v0.1.0 - updated 9 Nov 2023)

    Note

    This policy is subject to change, please check back regularly.

    "},{"location":"adding_software/contribution_policy/#purpose","title":"Purpose","text":"

    The purpose of this contribution policy is to provide guidelines for adding software to EESSI.

    It informs about what requirements must be met in order for software to be eligible for inclusion in the EESSI software layer.

    "},{"location":"adding_software/contribution_policy/#requirements","title":"Requirements","text":"

    The following requirements must be taken into account when adding software to EESSI.

    Note that additional restrictions may apply in specific cases that are currently not covered explicitly by this policy.

    "},{"location":"adding_software/contribution_policy/#freely_redistributable_software","title":"i) Freely redistributable software","text":"

    Only freely redistributable software can be added to the EESSI repository, and we strongly prefer including only open source software in EESSI.

    Make sure that you are aware of the relevant software licenses, and that redistribution of the software you want to add to EESSI is allowed.

    For more information about a specific software license, see the SPDX license list.

    Note

    We intend to automatically verify that this requirement is met, by requiring that the SPDX license identifier is provided for all software included in EESSI.

    "},{"location":"adding_software/contribution_policy/#built_by_bot","title":"ii) Built by the bot","text":"

    All software included in the EESSI repository must be built autonomously by our bot .

    For more information, see our semi-automatic software installation procedure.

    "},{"location":"adding_software/contribution_policy/#easybuild","title":"iii) Built and installed with EasyBuild","text":"

    We currently require that all software installations in EESSI are built and installed using EasyBuild.

    We strongly prefer that the latest release of EasyBuild that is available at the time is used to add software to EESSI.

    The use of --from-pr and --include-easyblocks-from-pr to pull in changes to EasyBuild that are required to make the installation work correctly in EESSI is allowed, but only if that is strictly required (that is, if those changes are not included yet in the latest EasyBuild release).

    "},{"location":"adding_software/contribution_policy/#supported_toolchain","title":"iv) Supported compiler toolchain","text":"

    A compiler toolchain that is still supported by the latest EasyBuild release must be used for building the software.

    For more information on supported toolchains, see the EasyBuild toolchain support policy.

    "},{"location":"adding_software/contribution_policy/#recent_toolchains","title":"v) Recent toolchain versions","text":"

    We strongly prefer adding software to EESSI that was built with a recent compiler toolchain.

    When adding software to a particular version of EESSI, you should use a toolchain version that is already installed.

    If you would like to see an additional toolchain version being added to a particular version of EESSI, please open a support request for this, and motivate your request.

    "},{"location":"adding_software/contribution_policy/#recent_software_versions","title":"vi) Recent software versions","text":"

    We strongly prefer adding sufficiently recent software versions to EESSI.

    If you would like to add older software versions, please clearly motivate the need for this in your contribution.

    "},{"location":"adding_software/contribution_policy/#cpu_targets","title":"vii) CPU targets","text":"

    Software that is added to EESSI should work on all supported CPU targets.

    Exceptions to this requirement are allowed if technical problems that can not be resolved with reasonable effort prevent the installation of the software for specific CPU targets.

    "},{"location":"adding_software/contribution_policy/#testing","title":"viii) Testing","text":"

    We should be able to test the software installations via the EESSI test suite, in particular for software applications and user-facing tools.

    Ideally one or more tests are available that verify that the software is functionally correct, and that it (still) performs well.

    Tests that are run during the software installation procedure as performed by EasyBuild must pass. Exceptions can be made if only a small subset of tests fail for specific CPU targets, as long as these exceptions are tracked and an effort is made to assess the impact of those failing tests.

    It should be possible to run a minimal smoke test for the software included in EESSI, for example using EasyBuild's --sanity-check-only feature.

    Note

    The EESSI test suite is still in active development, and currently only has a minimal set of tests available.

    When the test suite is more mature, this requirement will be enforced more strictly.

    "},{"location":"adding_software/contribution_policy/#changelog","title":"Changelog","text":""},{"location":"adding_software/contribution_policy/#v010-9-nov-2023","title":"v0.1.0 (9 Nov 2023)","text":"
    • initial contribution policy
    "},{"location":"adding_software/debugging_failed_builds/","title":"Debugging failed builds","text":"

    (for contributors + maintainers)

    Unfortunately, software does not always build successfully. Since EESSI targets novel CPU architectures as well, build failures on such platforms are quite common, as the software and/or the software build systems have not always been adjusted to support these architectures yet.

    In EESSI, all software packages are built by a bot. This is great for builds that complete successfully as we can build many software packages for a wide range of hardware with little human intervention. However, it does mean that you, as contributor, can not easily access the build directory and build logs to figure out build issues.

    This page describes how you can interactively reproduce failed builds, so that you can more easily debug the issue.

    Throughout this page, we will use this PR as an example. It intends to add LAMMPS to EESSI. Among other issues, it failed on a building Plumed.

    "},{"location":"adding_software/debugging_failed_builds/#prerequisites","title":"Prerequisites","text":"

    You will need to have:

    • Access to a machine with the hardware for which the build that you want to debug failed.
    • On that machine, meet the requirements for running the EESSI container, as described on this page.
    "},{"location":"adding_software/debugging_failed_builds/#preparing-the-environment","title":"Preparing the environment","text":"

    A number of steps are needed to create the same environment in which the bot builds.

    • Fetching the feature branch from which you want to replicate a build.
    • Starting a shell in the EESSI container.
    • Start the Gentoo Prefix environment.
    • Start the EESSI software environment.
    • Configure EasyBuild.
    "},{"location":"adding_software/debugging_failed_builds/#fetching-the-feature-branch","title":"Fetching the feature branch","text":"

    Looking at the example PR, we see the PR is created from this fork. First, we clone the fork, then checkout the feature branch (LAMMPS_23Jun2022)

    git clone https://github.com/laraPPr/software-layer/\ncd software-layer\ngit checkout LAMMPS_23Jun2022\n
    Alternatively, if you already have a clone of the software-layer you can add it as a new remote
    cd software-layer\ngit remote add laraPPr https://github.com/laraPPr/software-layer/\ngit fetch laraPPr\ngit checkout LAMMPS_23Jun2022\n

    "},{"location":"adding_software/debugging_failed_builds/#starting-a-shell-in-the-eessi-container","title":"Starting a shell in the EESSI container","text":"

    Simply run the EESSI container (eessi_container.sh), which should be in the root of the software-layer repository

    ./eessi_container.sh --access rw\n

    If you want to install NVIDIA GPU software, make sure to also add the --nvidia all argument, to insure that your GPU drivers get mounted inside the container:

    ./eessi_container.sh --access rw --nvidia all\n

    Note

    You may have to press enter to clearly see the prompt as some messages beginning with CernVM-FS: have been printed after the first prompt Apptainer> was shown.

    "},{"location":"adding_software/debugging_failed_builds/#more-efficient-approach-for-multiplecontinued-debugging-sessions","title":"More efficient approach for multiple/continued debugging sessions","text":"

    While the above works perfectly well, you might not be able to complete your debugging session in one go. With the above approach, several steps will just be repeated every time you start a debugging session:

    • Downloading the container
    • Installing CUDA in your host injections directory (only if you use the EESSI-install-software.sh script, see below)
    • Installing all dependencies (before you get to the package that actually fails to build)

    To avoid this, we create two directories. One holds the container & host_injections, which are (typically) common between multiple PRs and thus you don't have to redownload the container / reinstall the host_injections if you start working on another PR. The other will hold the PR-specific data: a tarball storing the software you'll build in your interactive debugging session. The paths we pick here are just example, you can pick any persistent, writeable location for this:

    eessi_common_dir=${HOME}/eessi-manual-builds\neessi_pr_dir=${HOME}/pr360\n

    Now, we start the container

    SINGULARITY_CACHEDIR=${eessi_common_dir}/container_cache ./eessi_container.sh --access rw --nvidia all --host-injections ${eessi_common_dir}/host_injections --save ${eessi_pr_dir}\n

    Here, the SINGULARITY_CACHEDIR makes sure that if the container was already downloaded, and is present in the cache, it is not redownloaded. The host injections will just be picked up from ${eessi_common_dir}/host_injections (if those were already installed before). And finally, the --save makes sure that everything that you build in the container gets stored in a tarball as soon as you exit the container.

    Note that the first exit command will first make you exit the Gentoo prefix environment. Only the second will take you out of the container, and print where the tarball will be stored:

    [EESSI 2023.06] $ exit\nlogout\nLeaving Gentoo Prefix with exit status 1\nApptainer> exit\nexit\nSaved contents of tmp directory '/tmp/eessi-debug.VgLf1v9gf0' to tarball '${HOME}/pr360/EESSI-1698056784.tgz' (to resume session add '--resume ${HOME}/pr360/EESSI-1698056784.tgz')\n

    Note that the tarballs can be quite sizeable, so make sure to pick a filesystem where you have a large enough quotum.

    Next time you want to continue investigating this issue, you can start the container with --resume DIR/TGZ and continue where you left off, having all dependencies already built and available.

    SINGULARITY_CACHEDIR=${eessi_common_dir}/container_cache ./eessi_container.sh --access rw --nvidia all --host-injections ${eessi_common_dir}/host_injections --save ${eessi_pr_dir}/EESSI-1698056784.tgz\n

    For a detailed description on using the script eessi_container.sh, see here.

    Note

    Reusing a previously downloaded container, or existing CUDA installation from a host_injections is not be a good approach if those could be the cause of your issues. If you are unsure if this is the case, simply follow the regular approach to starting the EESSI container.

    Note

    It is recommended to clean the container cache and host_injections directories every now and again, to make sure you pick up the latest changes for those two components.

    "},{"location":"adding_software/debugging_failed_builds/#start-the-gentoo-prefix-environment","title":"Start the Gentoo Prefix environment","text":"

    The next step is to start the Gentoo Prefix environment.

    Before we start, check the current values of ${EESSI_CVMFS_REPO} and ${EESSI_VERSION} so that you can reset them later:

    echo ${EESSI_CVMFS_REPO}\necho ${EESSI_VERSION}\n

    Then, we set EESSI_OS_TYPE and EESSI_CPU_FAMILY and run the startprefix command to start the Gentoo Prefix environment:

    export EESSI_OS_TYPE=linux  # We only support Linux for now\nexport EESSI_CPU_FAMILY=$(uname -m)\n${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/compat/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}/startprefix\n

    Now, reset the ${EESSI_CVMFS_REPO} and ${EESSI_VERSION} in your prefix environment with the initial values (printed in the echo statements above)

    export EESSI_CVMFS_REPO=...\nexport EESSI_VERSION=...\n

    Note

    By activating the Gentoo Prefix environment, the system tools (e.g. ls) you would normally use are now provided by Gentoo Prefix, instead of the container OS. E.g. running which ls after starting the prefix environment as above will return /cvmfs/software.eessi.io/versions/2023.06/compat/linux/x86_64/bin/ls. This makes the builds completely independent from the container OS.

    "},{"location":"adding_software/debugging_failed_builds/#building-for-the-generic-optimization-target","title":"Building for the generic optimization target","text":"

    If you want to replicate a build with generic optimization (i.e. in $EESSI_CVMFS_REPO/versions/${EESSI_VERSION}/software/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}/generic) you will need to set the following environment variable:

    export EESSI_CPU_FAMILY=$(uname -m) && export EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_CPU_FAMILY}/generic\n

    "},{"location":"adding_software/debugging_failed_builds/#building-software-with-the-eessi-install-softwaresh-script","title":"Building software with the EESSI-install-software.sh script","text":"

    The Automatic build and deploy bot installs software by executing the EESSI-install-software.sh script. The advantage is that running this script is the closest you can get to replicating the bot's behaviour - and thus the failure. The downside is that if a PR adds a lot of software, it may take quite a long time to run - even if you might already know what the problematic software package is. In that case, you might be better off following the steps under (Building software from an easystack file)[#building-software-from-an-easystack-file] or (Building an individual package)[#building-an-individual-package].

    Note that you could also combine approaches: first build everything using the EESSI-install-software.sh script, until you reproduce the failure. Then, start making modifications (e.g. changes to the EasyConfig, patches, etc) and trying to rebuild that package individually to test your changes.

    To build software using the EESSI-install-software.sh script, you'll first need to get the diff file for the PR. This is used by the EESSI-install-software.sh script to see what is changed in this PR - and thus what needs to be build for this PR. To download the diff for PR 360, we would e.g. do

    wget https://github.com/EESSI/software-layer/pull/360.diff\n

    Now, we run the EESSI-install-software.sh script:

    ./EESSI-install-software.sh\n
    "},{"location":"adding_software/debugging_failed_builds/#building-software-from-an-easystack-file","title":"Building software from an easystack file","text":""},{"location":"adding_software/debugging_failed_builds/#starting-the-eessi-software-environment","title":"Starting the EESSI software environment","text":"

    To activate the software environment, run

    source ${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/init/bash\n

    Note

    If you get an error bash: /versions//init/bash: No such file or directory, you forgot to reset the ${EESSI_CVFMS_REPO} and ${EESSI_VERSION} environment variables at the end of the previous step.

    Note

    If you want to build with generic optimization, you should run export EESSI_CPU_FAMILY=$(uname -m) && export EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_CPU_FAMILY}/generic before sourcing.

    For more info on starting the EESSI software environment, see here

    "},{"location":"adding_software/debugging_failed_builds/#configure-easybuild","title":"Configure EasyBuild","text":"

    It is important that we configure EasyBuild in the same way as the bot uses it, with one small exceptions: our working directory will be different. Typically, that doesn't matter, but it's good to be aware of this one difference, in case you fail to replicate the build failure.

    In this example, we create a unique temporary directory inside /tmp to serve both as our workdir. Finally, we will source the configure_easybuild script, which will configure EasyBuild by setting environment variables.

    export WORKDIR=$(mktemp --directory --tmpdir=/tmp  -t eessi-debug.XXXXXXXXXX)\nsource configure_easybuild\n
    Among other things, the configure_easybuild script sets the install path for EasyBuild to point to the correct installation directory in (to ${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/software/${EESSI_OS_TYPE}/${EESSI_SOFTWARE_SUBDIR}). This is the exact same path the bot uses to build, and uses a writeable overlay filesystem in the container to write to a path in /cvmfs (which normally is read-only). This is identical to what the bot does.

    Note

    If you started the container using --resume, you may want WORKDIR to point to the workdir you created previously (instead of creating a new, temporary directory with mktemp).

    Note

    If you want to replicate a build with generic optimization (i.e. in $EESSI_CVMFS_REPO/versions/${EESSI_VERSION}/software/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}/generic) you will need to set export EASYBUILD_OPTARCH=GENERIC after sourcing configure_easybuild.

    Next, we need to determine the correct version of EasyBuild to load. Since the example PR changes the file eessi-2023.06-eb-4.8.1-2021b.yml, this tells us the bot was using version 4.8.1 of EasyBuild to build this. Thus, we load that version of the EasyBuild module and check if everything was configured correctly:

    module load EasyBuild/4.8.1\neb --show-config\n
    You should get something similar to

    #\n# Current EasyBuild configuration\n# (C: command line argument, D: default value, E: environment variable, F: configuration file)\n#\nbuildpath            (E) = /tmp/easybuild/easybuild/build\ncontainerpath        (E) = /tmp/easybuild/easybuild/containers\ndebug                (E) = True\nexperimental         (E) = True\nfilter-deps          (E) = Autoconf, Automake, Autotools, binutils, bzip2, DBus, flex, gettext, gperf, help2man, intltool, libreadline, libtool, Lua, M4, makeinfo, ncurses, util-linux, XZ, zlib, Yasm\nfilter-env-vars      (E) = LD_LIBRARY_PATH\nhooks                (E) = ${HOME}/software-layer/eb_hooks.py\nignore-osdeps        (E) = True\ninstallpath          (E) = /tmp/easybuild/software/linux/aarch64/neoverse_n1\nmodule-extensions    (E) = True\npackagepath          (E) = /tmp/easybuild/easybuild/packages\nprefix               (E) = /tmp/easybuild/easybuild\nread-only-installdir (E) = True\nrepositorypath       (E) = /tmp/easybuild/easybuild/ebfiles_repo\nrobot-paths          (D) = /cvmfs/software.eessi.io/versions/2023.06/software/linux/aarch64/neoverse_n1/software/EasyBuild/4.8.1/easybuild/easyconfigs\nrpath                (E) = True\nsourcepath           (E) = /tmp/easybuild/easybuild/sources:\nsysroot              (E) = /cvmfs/software.eessi.io/versions/2023.06/compat/linux/aarch64\ntrace                (E) = True\nzip-logs             (E) = bzip2\n
    "},{"location":"adding_software/debugging_failed_builds/#building-everything-in-the-easystack-file","title":"Building everything in the easystack file","text":"

    In our example PR, the easystack file that was changed was eessi-2023.06-eb-4.8.1-2021b.yml. To build this, we run (in the directory that contains the checkout of this feature branch):

    eb --easystack eessi-2023.06-eb-4.8.1-2021b.yml --robot\n
    After some time, this build fails while trying to build Plumed, and we can access the build log to look for clues on why it failed.

    "},{"location":"adding_software/debugging_failed_builds/#building-an-individual-package","title":"Building an individual package","text":"

    First, prepare the environment by following the [Starting the EESSI software environment][#starting-the-eessi-software-environment] and Configure EasyBuild above.

    In our example PR, the individual package that was added to eessi-2023.06-eb-4.8.1-2021b.yml was LAMMPS-23Jun2022-foss-2021b-kokkos.eb. To mimic the build behaviour, we'll also have to (re)use any options that are listed in the easystack file for LAMMPS-23Jun2022-foss-2021b-kokkos.eb, in this case the option --from-pr 19000. Thus, to build, we run:

    eb LAMMPS-23Jun2022-foss-2021b-kokkos.eb --robot --from-pr 19000\n
    After some time, this build fails while trying to build Plumed, and we can access the build log to look for clues on why it failed.

    Note

    While this might be faster than the easystack-based approach, this is not how the bot builds. So why it may reproduce the failure the bot encounters, it may not reproduce the bug at all (no failure) or run into different bugs. If you want to be sure, use the easystack-based approach.

    "},{"location":"adding_software/debugging_failed_builds/#known-causes-of-issues-in-eessi","title":"Known causes of issues in EESSI","text":""},{"location":"adding_software/debugging_failed_builds/#the-custom-system-prefix-of-the-compatibility-layer","title":"The custom system prefix of the compatibility layer","text":"

    Some installations might expect the system root (sysroot, for short) to be in /. However, in case of EESSI, we are building against the OS in the compatibility layer. Thus, our sysroot is something like ${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/compat/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}. This can cause issues if installation procedures assume the sysroot is in /.

    One example of a sysroot issue was in installing wget. The EasyConfig for wget defined

    # make sure pkg-config picks up system packages (OpenSSL & co)\npreconfigopts = \"export PKG_CONFIG_PATH=/usr/lib64/pkgconfig:/usr/lib/pkgconfig:/usr/lib/x86_64-linux-gnu/pkgconfig && \"\nconfigopts = '--with-ssl=openssl '\n
    This will not work in EESSI, since the OpenSSL should be picked up from the compatibility layer. This was fixed by changing the EasyConfig to read
    preconfigopts = \"export PKG_CONFIG_PATH=%(sysroot)s/usr/lib64/pkgconfig:%(sysroot)s/usr/lib/pkgconfig:%(sysroot)s/usr/lib/x86_64-linux-gnu/pkgconfig && \"\nconfigopts = '--with-ssl=openssl\n
    The %(sysroot)s is a template value which EasyBuild will resolve to the value that has been configured in EasyBuild for sysroot (it is one of the fields printed by eb --show-config if a non-standard sysroot is configured).

    If you encounter issues where the installation can not find something that is normally provided by the OS (i.e. not one of the dependencies in your module environment), you may need to resort to a similar approach.

    "},{"location":"adding_software/debugging_failed_builds/#the-writeable-overlay","title":"The writeable overlay","text":"

    The writeable overlay in the container is known to be a bit slow sometimes. Thus, we have seen tests failing because they exceed some timeout (e.g. this issue).

    To investigate if the writeable overlay is somehow the issue, you can make sure the installation gets done somewhere else, e.g. in the temporary directory in /tmp that you created as workdir. To do this, set

    export EASYBUILD_INSTALLPATH=${WORKDIR}\n

    after the step in which you have sourced the configure_easybuild script. Note that in order to find (with module av) any modules that get installed here, you will need to add this path to the MODULEPATH:

    module use ${EASYBUILD_INSTALLPATH}/modules/all\n

    Then, retry building the software (as described above). If the build now succeeds, you know that indeed the writeable overlay caused the issue. We have to build in this writeable overlay when we do real deployments. Thus, if you hit such a timeout, try to see if you can (temporarily) modify the timeout value in the test so that it passes.

    "},{"location":"adding_software/deploying_software/","title":"Deploying software","text":"

    (for maintainers)

    "},{"location":"adding_software/deploying_software/#instructing-the-bot-to-deploy","title":"Instructing the bot to deploy","text":"

    To make the bot deploy the successfully built software, you should issue the corresponding instruction to the bot.

    For more information, see the deploying section in the bot documentation.

    Warning

    Permission to trigger deployment of software installations must be granted to your GitHub account first!

    See bot permissions for more information.

    "},{"location":"adding_software/deploying_software/#merging-the-pull-request","title":"Merging the pull request","text":"

    You should be able to verify in the pull request that the ingestion has been done, since the CI should fail initially to indicate that some software installations listed in your modified easystack are missing.

    Once the ingestion has been done, simply re-triggering the CI workflow should be sufficient to make it pass , and then the pull request can be merged.

    Note

    This assumes that the easystack file being modified is considered by the CI workflow file (.github/workflows/test_eessi.yml) that checks for missing installations, in the correct branch (for example 2023.06) of the software-layer.

    If that's not the case yet, update this workflow in your pull request as well to add the missing easystack file!

    Warning

    You need permissions to re-trigger CI workflows and merge pull requests in the software-layer repository.

    Ask for help in the #software-layer channel of the EESSI Slack if needed!

    "},{"location":"adding_software/deploying_software/#getting-help","title":"Getting help","text":"

    If you have any questions, or if you need help with something, don't hesitate to contact us via the #software-layer channel of the EESSI Slack.

    "},{"location":"adding_software/opening_pr/","title":"Opening a pull request","text":"

    (for contributors)

    To add software to EESSI, you should go through the semi-automatic software installation procedure by:

    • 1) Making a pull request to the software-layer repository to (add or) update an easystack file that is used by EasyBuild to install software;
    • 2) Instructing the bot to build the software on all supported CPU microarchitectures;
    • 3) Instructing the bot to deploy the built software for ingestion into the EESSI repository;
    • 4) Merging the pull request once CI indicates that the software has been ingested.

    Warning

    Make sure you are also aware of our contribution policy when adding software to EESSI.

    "},{"location":"adding_software/opening_pr/#preparation","title":"Preparation","text":"

    Before you can make a pull request to the software-layer, you should fork the repository in your GitHub account.

    For the remainder of these instructions, we assume that your GitHub account is @koala .

    Note

    Don't forget to replace koala with the name of your GitHub account in the commands below!

    1) Clone the EESSI/software-layer repository:

    mkdir EESSI\ncd EESSI\ngit clone https://github.com/EESSI/software-layer\ncd software-layer\n

    2) Add your fork as a remote

    git remote add koala git@github.com:koala/software-layer.git\n

    3) Check out the branch that corresponds to the version of EESSI repository you want to add software to, for example 2023.06-software.eessi.io:

    git checkout 2023.06-software.eessi.io\n

    Note

    The commands above only need to be run once, to prepare your setup for making pull requests.

    "},{"location":"adding_software/opening_pr/#software_layer_pull_request","title":"Creating a pull request","text":"

    1) Make sure that your 2023.06-software.eessi.io branch in the checkout of the EESSI/software-layer repository is up-to-date

    cd EESSI/software-layer\ngit checkout 2023.06-software.eessi.io \ngit pull origin 2023.06-software.eessi.io \n

    2) Create a new branch (use a sensible name, not example_branch as below), and check it out

    git checkout -b example_branch\n

    3) Determine the correct easystack file to change, and add one or more lines to it that specify which easyconfigs should be installed

    echo '  - example-1.2.3-GCC-12.3.0.eb' >> easystacks/software.eessi.io/2023.06/eessi-2023.06-eb-4.8.2-2023a.yml\n

    4) Stage and commit the changes into your your branch with a sensible message

    git add easystacks/software.eessi.io/2023.06/eessi-2023.06-eb-4.8.2-2023a.yml\ngit commit -m \"{2023.06}[GCC/12.3.0] example 1.2.3\"\n

    5) Push your branch to your fork of the software-layer repository

    git push koala example_branch\n

    6) Go to the GitHub web interface to open your pull request, or use the helpful link that should show up in the output of the git push command.

    Make sure you target the correct branch: the one that corresponds to the version of EESSI you want to add software to (like 2023.06-software.eessi.io).

    If all goes well, one or more bots should almost instantly create a comment in your pull request with an overview of how it is configured - you will need this information when providing build instructions.

    "},{"location":"adding_software/overview/","title":"Overview of adding software to EESSI","text":"

    We welcome contributions to the EESSI software stack. This page shows the procedure and provides links to the contribution policy and the technical details of making a contribution.

    "},{"location":"adding_software/overview/#contribute-a-software-to-the-eessi-software-stack","title":"Contribute a software to the EESSI software stack","text":"
    \n%%{init: { 'theme':'forest', 'sequence': {'useMaxWidth':false} } }%%\nflowchart TB\n    I(contributor)  \n    K(reviewer)\n    A(Is there an EasyConfig for software) -->|No|B(Create an EasyConfig and contribute it to EasyBuild)\n    A --> |Yes|D(Create a PR to software-layer)\n    B --> C(Evaluate and merge pull request)\n    C --> D\n    D --> E(Review PR & trigger builds)\n    E --> F(Debug build issue if needed)\n    F --> G(Deploy tarballs to S3 bucket)\n    G --> H(Ingest tarballs in EESSI by merging staging PRs)\n     classDef blue fill:#9abcff,stroke:#333,stroke-width:2px;\n     class A,B,D,F,I blue\n     click B \"https://easybuild.io/\"\n     click D \"../opening_pr/\"\n     click F \"../debugging_failed_builds/\"\n
    "},{"location":"adding_software/overview/#contributing-a-reframe-test-to-the-eessi-test-suite","title":"Contributing a ReFrame test to the EESSI test suite","text":"

    Ideally, a contributor prepares a ReFrame test for the software to be added to the EESSI software stack.

    \n%%{init: { 'theme':'forest', 'sequence': {'useMaxWidth':false} } }%%\nflowchart TB\n\n    Z(Create ReFrame test & PR to tests-suite) --> Y(Review PR & run new test)\n    Y --> W(Debug issue if needed) \n    W --> V(Review PR if needed)\n    V --> U(Merge PR)\n     classDef blue fill:#9abcff,stroke:#333,stroke-width:2px;\n     class Z,W blue\n
    "},{"location":"adding_software/overview/#more-about-adding-software-to-eessi","title":"More about adding software to EESSI","text":"
    • Contribution policy
    • Opening a pull request (for contributors)
    • Building software (for maintainers)
    • Debugging failed builds (for contributors + maintainers)
    • Deploying software (for maintainers)

    If you need help with adding software to EESSI, please open a support request.

    "},{"location":"blog/","title":"Blog","text":""},{"location":"blog/2024/05/17/isc24/","title":"EESSI promo tour @ ISC'24 (May 2024, Hamburg)","text":"

    This week, we had the privilege of attending the ISC'24 conference in the beautiful city of Hamburg, Germany. This was an excellent opportunity for us to showcase EESSI, and gain valuable insights and feedback from the HPC community.

    "},{"location":"blog/2024/05/17/isc24/#bof-session-on-eessi","title":"BoF session on EESSI","text":"

    The EESSI Birds-of-a-Feather (BoF) session on Tuesday morning, part of the official ISC'24 program, was the highlight of our activities in Hamburg.

    It was well attended, with well over 100 people joining us at 9am.

    During this session, we introduced the EESSI project with a short presentation, followed by a well-received live hands-on demo of installing and using EESSI by spinning up an \"empty\" Linux virtual machine instance in Amazon EC2 and getting optimized installations of popular scientific applications like GROMACS and TensorFlow running in a matter of minutes.

    During the second part of the BoF session, we engaged with the audience through an interactive poll and by letting attendees ask questions.

    The presentation slides, including the results of the interactive poll and questions that were raised by attendees, are available here.

    "},{"location":"blog/2024/05/17/isc24/#workshops","title":"Workshops","text":"

    During the last day of ISC'24, EESSI was present in no less than three different workshops.

    "},{"location":"blog/2024/05/17/isc24/#risc-v-workshop","title":"RISC-V workshop","text":"

    At the Fourth International workshop on RISC-V for HPC, Juli\u00e1n Morillo (BSC) presented our paper \"Preparing to Hit the Ground Running: Adding RISC-V support to EESSI\" (slides available here).

    Juli\u00e1n covered the initial work that was done in the scope of the MultiXscale EuroHPC Centre-of-Excellence to add support for RISC-V to EESSI, outlined the challenges we encountered, and shared the lessons we have learned along the way.

    "},{"location":"blog/2024/05/17/isc24/#ahug-workshop","title":"AHUG workshop","text":"

    During the Arm HPC User Group (AHUG) workshop, Kenneth Hoste (HPC-UGent) gave a talk entitled \"Extending Arm\u2019s Reach by Going EESSI\" (slides available here).

    Next to a high-level introduction to EESSI, we briefly covered some of the challenges we encountered when testing the optimized software installations that we had built for the Arm Neoverse V1 microarchitecture, including bugs in OpenMPI and GROMACS.

    Kenneth gave a live demonstration of how to get access to EESSI and start running the optimized software installations we provide through our CernVM-FS repository on a fresh AWS Graviton 3 instance in a matter of minutes.

    "},{"location":"blog/2024/05/17/isc24/#pop-workshop","title":"POP workshop","text":"

    In the afternoon on Thursday, Lara Peeters (HPC-UGent) presented MultiXscale during the Readiness of HPC Extreme-scale Applications workshop, which was organised by the POP EuroHPC Centre-of-Excellence (slides available here).

    Lara outlined the pilot use cases on which MultiXscale focuses, and explained how EESSI helps to achieve the goals of MultiXscale in terms of Productivity, Performance, and Portability.

    At the end of the workshop, a group picture was taken with both organisers and speakers, which was a great way to wrap up a busy week in Hamburg!

    "},{"location":"blog/2024/05/17/isc24/#talks-and-demos-on-eessi-at-exhibit","title":"Talks and demos on EESSI at exhibit","text":"

    Not only was EESSI part of the official ISC'24 program via a dedicated BoF session and various workshops: we were also prominently present on the exhibit floor.

    "},{"location":"blog/2024/05/17/isc24/#microsoft-azure-booth","title":"Microsoft Azure booth","text":"

    Microsoft Azure invited us to give a 1-hour introductory presentation on EESSI on both Monday and Wednesday at their booth during the ISC'24 exhibit, as well as to provide live demonstrations at the demo corner of their booth on Tuesday afternoon on how to get access to EESSI and the user experience it provides.

    Exhibit attendees were welcome to pass by and ask questions, and did so throughout the full 4 hours we were present there.

    Both Microsoft Azure and AWS have been graciously providing resources in their cloud infrastructure free-of-cost for developing, testing, and demonstrating EESSI for several years now.

    "},{"location":"blog/2024/05/17/isc24/#eurohpc-booth","title":"EuroHPC booth","text":"

    The MultiXscale EuroHPC Centre-of-Excellence we are actively involved in, and through which the development of EESSI is being co-funded since Jan'23, was invited by the EuroHPC JU to present the goals and preliminary achievements at their booth.

    Elisabeth Ortega (HPCNow!) did the honours to give the last talk at the EuroHPC JU booth of the ISC'24 exhibit.

    "},{"location":"blog/2024/05/17/isc24/#stickers","title":"Stickers!","text":"

    Last but not least: we handed out a boatload free stickers with the logo of both MultiXscale and EESSI itself, as well as of various of the open source software projects we leverage, including EasyBuild, Lmod, and CernVM-FS.

    We have mostly exhausted our sticker collection during ISC'24, but don't worry: we will make sure we have more available at upcoming events...

    "},{"location":"filesystem_layer/stratum1/","title":"Setting up a Stratum 1","text":"

    Setting up a Stratum 1 involves the following steps:

    • set up the Stratum 1, preferably by running the Ansible playbook that we provide;
    • request a Stratum 0 firewall exception for your Stratum 1 server;
    • request a <your site>.stratum1.cvmfs.eessi-infra.org DNS entry;
    • open a pull request to include the URL to your Stratum 1 in the EESSI configuration.

    The last two steps can be skipped if you want to host a \"private\" Stratum 1 for your site.

    "},{"location":"filesystem_layer/stratum1/#requirements-for-a-stratum-1","title":"Requirements for a Stratum 1","text":"

    The main requirements for a Stratum 1 server are a good network connection to the clients it is going to serve, and sufficient disk space. For the EESSI repository, a few hundred gigabytes should suffice, but for production environments at least 1 TB would be recommended.

    In terms of cores and memory, a machine with just a few (~4) cores and 4-8 GB of memory should suffice.

    Various Linux distributions are supported, but we recommend one based on RHEL 7 or 8.

    Finally, make sure that ports 80 (for the Apache web server) and 8000 are open.

    "},{"location":"filesystem_layer/stratum1/#step-1-set-up-the-stratum-1","title":"Step 1: set up the Stratum 1","text":"

    The recommended way for setting up an EESSI Stratum 1 is by running the Ansible playbook stratum1.yml from the filesystem-layer repository on GitHub.

    Installing a Stratum 1 requires a GEO API license key, which will be used to find the (geographically) closest Stratum 1 server for your client and proxies. More information on how to (freely) obtain this key is available in the CVMFS documentation: https://cvmfs.readthedocs.io/en/stable/cpt-replica.html#geo-api-setup.

    You can put your license key in the local configuration file inventory/local_site_specific_vars.yml.

    Furthermore, the Stratum 1 runs a Squid server. The template configuration file can be found at templates/eessi_stratum1_squid.conf.j2. If you want to customize it, for instance for limiting the access to the Stratum 1, you can make your own version of this template file and point to it by setting local_stratum1_cvmfs_squid_conf_src in inventory/local_site_specific_vars.yml. See the comments in the example file for more details.

    Start by installing Ansible:

    sudo yum install -y ansible\n

    Then install Ansible roles for EESSI:

    ansible-galaxy role install -r requirements.yml -p ./roles --force\n

    Make sure you have enough space in /srv (on the Stratum 1) since the snapshot of the Stratum 0 will end up there by default. To alter the directory where the snapshot gets copied to you can add this variable in inventory/host_vars/<url-or-ip-to-your-stratum1>:

    cvmfs_srv_mount: /srv\n

    Make sure that you have added the hostname or IP address of your server to the inventory/hosts file. Finally, install the Stratum 1 using one of the two following options.

    Option 1:

    # -b to run as root, optionally use -K if a sudo password is required\nansible-playbook -b [-K] -e @inventory/local_site_specific_vars.yml stratum1.yml\n

    Option2:

    Create a ssh key pair and make sure the ansible-host-keys.pub is in the $HOME/.ssh/authorized_keys file on your Stratum 1 server.

    ssh-keygen -b 2048 -t rsa -f ~/.ssh/ansible-host-keys -q -N \"\"\n

    Then run the playbook:

    ansible-playbook -b --private-key ~/.ssh/ansible-host-keys -e @inventory/local_site_specific_vars.yml stratum1.yml\n

    Running the playbook will automatically make replicas of all the repositories defined in group_vars/all.yml.

    "},{"location":"filesystem_layer/stratum1/#step-2-request-a-firewall-exception","title":"Step 2: request a firewall exception","text":"

    (This step is not implemented yet and can be skipped)

    You can request a firewall exception rule to be added for your Stratum 1 server by opening an issue on the GitHub page of the filesystem layer repository.

    Make sure to include the IP address of your server.

    "},{"location":"filesystem_layer/stratum1/#step-3-verification-of-the-stratum-1","title":"Step 3: Verification of the Stratum 1","text":"

    When the playbook has finished your Stratum 1 should be ready. In order to test your Stratum 1, even without a client installed, you can use curl.

    curl --head http://<url-or-ip-to-your-stratum1>/cvmfs/software.eessi.io/.cvmfspublished\n
    This should return:

    HTTP/1.1 200 OK\n...\nX-Cache: MISS from <url-or-ip-to-your-stratum1>\n

    The second time you run it, you should get a cache hit:

    X-Cache: HIT from <url-or-ip-to-your-stratum1>\n

    Example with the Norwegian Stratum 1:

    curl --head http://bgo-no.stratum1.cvmfs.eessi-infra.org/cvmfs/software.eessi.io/.cvmfspublished\n

    You can also test access to your Stratum 1 from a client, for which you will have to install the CVMFS client.

    Then run the following command to add your newly created Stratum 1 to the existing list of EESSI Stratum 1 servers by creating a local CVMFS configuration file:

    echo 'CVMFS_SERVER_URL=\"http://<url-or-ip-to-your-stratum1>/cvmfs/@fqrn@;$CVMFS_SERVER_URL\"' | sudo tee -a /etc/cvmfs/domain.d/eessi-hpc.org.local\n

    If this is the first time you set up the client you now run:

    sudo cvmfs_config setup\n

    If you already had configured the client before, you can simply reload the config:

    sudo cvmfs_config reload -c software.eessi.io\n

    Finally, verify that the client connects to your new Stratum 1 by running:

    cvmfs_config stat -v software.eessi.io\n

    Assuming that your new Stratum 1 is the geographically closest one to your client, this should return:

    Connection: http://<url-or-ip-to-your-stratum1>/cvmfs/software.eessi.io through proxy DIRECT (online)\n
    "},{"location":"filesystem_layer/stratum1/#step-4-request-an-eessi-dns-name","title":"Step 4: request an EESSI DNS name","text":"

    In order to keep the configuration clean and easy, all the EESSI Stratum 1 servers have a DNS name <your site>.stratum1.cvmfs.eessi-infra.org, where <your site> is often a short name or abbreviation followed by the country code (e.g. rug-nl or bgo-no). You can request this for your Stratum 1 by mentioning this in the issue that you created in Step 2, or by opening another issue.

    "},{"location":"filesystem_layer/stratum1/#step-5-include-your-stratum-1-in-the-eessi-configuration","title":"Step 5: include your Stratum 1 in the EESSI configuration","text":"

    If you want to include your Stratum 1 in the EESSI configuration, i.e. allow any (nearby) client to be able to use it, you can open a pull request with updated configuration files. You will only have to add the URL to your Stratum 1 to the urls list of the eessi_cvmfs_server_urls variable in the all.yml file.

    "},{"location":"getting_access/eessi_container/","title":"EESSI container script","text":"

    The eessi_container.sh script provides a very easy yet versatile means to access EESSI. It is the preferred method to start an EESSI container as it has support for many different scenarios via various options.

    This page guides you through several example scenarios illustrating the use of the script.

    "},{"location":"getting_access/eessi_container/#prerequisites","title":"Prerequisites","text":"
    • Apptainer 1.0.0 (or newer), or Singularity 3.7.x
      • Check with apptainer --version or singularity --version
      • Support for the --fusemount option in the shell and run subcommands is required
    • Git
      • Check with git --version
    "},{"location":"getting_access/eessi_container/#preparation","title":"Preparation","text":"

    Clone the EESSI/software-layer repository and change into the software-layer directory by running these commands:

    git clone https://github.com/EESSI/software-layer.git\ncd software-layer\n
    "},{"location":"getting_access/eessi_container/#quickstart","title":"Quickstart","text":"

    Run the eessi_container script (from the software-layer directory) to start a shell session in the EESSI container:

    ./eessi_container.sh\n

    Note

    Startup will take a bit longer the first time you run this because the container image is downloaded and converted.

    You should see output like

    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell  --fusemount container:cvmfs2 cvmfs-config.cern.ch /cvmfs/cvmfs-config.cern.ch --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.ymYGaZwoWC/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nApptainer> CernVM-FS: loading Fuse module... done\nCernVM-FS: loading Fuse module... done\n\nApptainer>\n

    Note

    You may have to press enter to clearly see the prompt as some messages beginning with CernVM-FS: have been printed after the first prompt Apptainer> was shown.

    To start using EESSI, see Using EESSI/Setting up your environment.

    "},{"location":"getting_access/eessi_container/#help-for-eessi_containersh","title":"Help for eessi_container.sh","text":"

    The example in the Quickstart section facilitates an interactive session with read access to the EESSI software stack. It does not require any command line options, because the script eessi_container.sh uses some carefully chosen defaults. To view all options of the script and its default values, run the command

    ./eessi_container.sh --help\n
    You should see the following output
    usage: ./eessi_container.sh [OPTIONS] [[--] SCRIPT or COMMAND]\n OPTIONS:\n  -a | --access {ro,rw}  - ro (read-only), rw (read & write) [default: ro]\n  -c | --container IMG   - image file or URL defining the container to use\n                           [default: docker://ghcr.io/eessi/build-node:debian11]\n  -g | --storage DIR     - directory space on host machine (used for\n                           temporary data) [default: 1. TMPDIR, 2. /tmp]\n  -h | --help            - display this usage information [default: false]\n  -i | --host-injections - directory to link to for host_injections \n                           [default: /..storage../opt-eessi]\n  -l | --list-repos      - list available repository identifiers [default: false]\n  -m | --mode MODE       - with MODE==shell (launch interactive shell) or\n                           MODE==run (run a script or command) [default: shell]\n  -n | --nvidia MODE     - configure the container to work with NVIDIA GPUs,\n                           MODE==install for a CUDA installation, MODE==run to\n                           attach a GPU, MODE==all for both [default: false]\n  -r | --repository CFG  - configuration file or identifier defining the\n                           repository to use [default: EESSI via\n                           container configuration]\n  -u | --resume DIR/TGZ  - resume a previous run from a directory or tarball,\n                           where DIR points to a previously used tmp directory\n                           (check for output 'Using DIR as tmp ...' of a previous\n                           run) and TGZ is the path to a tarball which is\n                           unpacked the tmp dir stored on the local storage space\n                           (see option --storage above) [default: not set]\n  -s | --save DIR/TGZ    - save contents of tmp directory to a tarball in\n                           directory DIR or provided with the fixed full path TGZ\n                           when a directory is provided, the format of the\n                           tarball's name will be {REPO_ID}-{TIMESTAMP}.tgz\n                           [default: not set]\n  -v | --verbose         - display more information [default: false]\n  -x | --http-proxy URL  - provides URL for the env variable http_proxy\n                           [default: not set]; uses env var $http_proxy if set\n  -y | --https-proxy URL - provides URL for the env variable https_proxy\n                           [default: not set]; uses env var $https_proxy if set\n\n If value for --mode is 'run', the SCRIPT/COMMAND provided is executed. If\n arguments to the script/command start with '-' or '--', use the flag terminator\n '--' to let eessi_container.sh stop parsing arguments.\n

    So, the defaults are equal to running the command

    ./eessi_container.sh --access ro --container docker://ghcr.io/eessi/build-node:debian11 --mode shell --repository EESSI\n
    and it would either create a temporary directory under ${TMPDIR} (if defined), or /tmp (if ${TMPDIR} is not defined).

    The remainder of this page will demonstrate different scenarios using some of the command line options used for read-only access.

    Other options supported by the script will be discussed in a yet-to-be written section covering building software to be added to the EESSI stack.

    "},{"location":"getting_access/eessi_container/#resuming-a-previous-session","title":"Resuming a previous session","text":"

    You may have noted the following line in the output of eessi_container.sh

    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).\n

    Note

    The parameter after --resume (/tmp/eessi.abc123defg) will be different when you run eessi_container.sh.

    Scroll back in your terminal and copy it so you can pass it to --resume.

    Try the following command to \"resume\" from the last session.

    ./eessi_container.sh --resume /tmp/eessi.abc123defg\n
    This should run much faster because the container image has been cached in the temporary directory (/tmp/eessi.abc123defg). You should get to the prompt (Apptainer> or Singularity>) and can use EESSI with the state where you left the previous session.

    Note

    The state refers to what was stored on disk, not what was changed in memory. Particularly, any environment (variable) settings are not restored automatically.

    Because the /tmp/eessi.abc123defg directory contains a home directory which includes the saved history of your last session, you can easily restore the environment (variable) settings. Type history to see which commands you ran. You should be able to access the history as you would do in a normal terminal session.

    "},{"location":"getting_access/eessi_container/#running-a-simple-command","title":"Running a simple command","text":"

    Let's \"ls /cvmfs/software.eessi.io\" through the eessi_container.sh script to check if the CernVM-FS EESSI repository is accessible:

    ./eessi_container.sh --mode run ls /cvmfs/software.eessi.io\n

    You should see an output such as

    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).$\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell  --fusemount container:cvmfs2 cvmfs-config.cern.ch /cvmfs/cvmfs-config.cern.ch --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.ymYGaZwoWC/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nhost_injections  latest  versions\n

    Note that this time no interactive shell session is started in the container: only the provided command is run in the container, and when that finishes you are back in the shell session where you ran the eessi_container.sh script.

    This is because we used the --mode run command line option.

    Note

    The last line in the output is the output of the ls command, which shows the contents of the /cvmfs/software.eessi.io directory.

    Also, note that there is no shell prompt (Apptainer> or Singularity), since no interactive shell session is started in the container.

    Alternatively to specify the command as we did above, you can also do the following.

    CMD=\"ls -l /cvmfs/software.eessi.io\"\n./eessi_container.sh --mode shell <<< ${CMD}\n

    Note

    We changed the mode from run to shell because we use a different method to let the script run our command, by feeding it in via the stdin input channel using <<<.

    Because shell is the default value for --mode we can also omit this and simply run

    CMD=\"ls -l /cvmfs/software.eessi.io\"\n./eessi_container.sh <<< ${CMD}\n

    "},{"location":"getting_access/eessi_container/#running-a-script","title":"Running a script","text":"

    While running simple command can be sufficient in some cases, you often want to run scripts containing multiple commands.

    Let's run the script shown below.

    First, copy-paste the contents for the script shown below, and create a file named eessi_architectures.sh in your current directory. Also make the script executable, by running:

    chmod +x eessi_architectures.sh\n

    Here are the contents for the eessi_architectures.sh script:

    #!/usr/bin/env bash\n#\n# This script determines which architectures are included in the\n# latest EESSI version. It makes use of the specific directory\n# structure in the EESSI repository.\n#\n\n# determine list of available OS types\nBASE=${EESSI_CVMFS_REPO:-/cvmfs/software.eessi.io}/latest/software\ncd ${BASE}\nfor os_type in $(ls -d *)\ndo\n    # determine architecture families\n    OS_BASE=${BASE}/${os_type}\n    cd ${OS_BASE}\n    for arch_family in $(ls -d *)\n    do\n        # determine CPU microarchitectures\n        OS_ARCH_BASE=${BASE}/${os_type}/${arch_family}\n        cd ${OS_ARCH_BASE}\n        for microarch in $(ls -d *)\n        do\n            case ${microarch} in\n                amd | intel )\n                    for sub in $(ls ${microarch})\n                    do\n                        echo \"${os_type}/${arch_family}/${microarch}/${sub}\"\n                    done\n                    ;;\n                * )\n                    echo \"${os_type}/${arch_family}/${microarch}\"\n                    ;;\n            esac\n        done\n    done\ndone\n
    Run the script as follows
    ./eessi_container.sh --mode shell < eessi_architectures.sh\n
    The output should be similar to
    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).$\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nlinux/aarch64/generic\nlinux/aarch64/graviton2\nlinux/aarch64/graviton3\nlinux/ppc64le/generic\nlinux/ppc64le/power9le\nlinux/x86_64/amd/zen2\nlinux/x86_64/amd/zen3\nlinux/x86_64/generic\nlinux/x86_64/intel/haswell\nlinux/x86_64/intel/skylake_avx512\n
    Lines 6 to 15 show the output of the script eessi_architectures.sh.

    If you want to use the mode run, you have to make the script's location available inside the container.

    This can be done by mapping the current directory (${PWD}), which contains eessi_architectures.sh, to any not-yet existing directory inside the container using the $SINGULARITY_BIND or $APPTAINER_BIND environment variable.

    For example:

    SINGULARITY_BIND=${PWD}:/scripts ./eessi_container.sh --mode run /scripts/eessi_architectures.sh\n

    "},{"location":"getting_access/eessi_container/#running-scripts-or-commands-with-parameters-starting-with-or-","title":"Running scripts or commands with parameters starting with - or --","text":"

    Let's assume we would like to get more information about the entries of /cvmfs/software.eessi.io. If we would just run

    ./eessi_container.sh --mode run ls -lH /cvmfs/software.eessi.io\n
    we would get an error message such as
    ERROR: Unknown option: -lH\n
    We can resolve this in two ways:

    1. Using the stdin channel as described above, for example, by simply running
      CMD=\"ls -lH /cvmfs/software.eessi.io\"\n./eessi_container.sh <<< ${CMD}\n
      which should result in the output similar to
      Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\ntotal 10\nlrwxrwxrwx 1 user user   10 Jun 30  2021 host_injections -> /opt/eessi\nlrwxrwxrwx 1 user user   16 May  4  2022 latest -> versions/2021.12\ndrwxr-xr-x 3 user user 4096 Dec 10  2021 versions\n
    2. Using the flag terminator -- which tells eessi_container.sh to stop parsing command line arguments. For example,
      ./eessi_container.sh --mode run -- ls -lH /cvmfs/software.eessi.io\n
      which should result in the output similar to
      Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q run --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif ls -lH /cvmfs/software.eessi.io\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\ntotal 10\nlrwxrwxrwx 1 user user   10 Jun 30  2021 host_injections -> /opt/eessi\nlrwxrwxrwx 1 user user   16 May  4  2022 latest -> versions/2021.12\ndrwxr-xr-x 3 user user 4096 Dec 10  2021 versions\n
    "},{"location":"getting_access/eessi_container/#running-eessi-demos","title":"Running EESSI demos","text":"

    For examples of scripts that use the software provided by EESSI, see Running EESSI demos.

    "},{"location":"getting_access/eessi_container/#launching-containers-more-quickly","title":"Launching containers more quickly","text":"

    Subsequent runs of eessi_container.sh may reuse temporary data of a previous session, which includes the pulled image of the container. However, that is not always what we want, i.e., reusing a previous session (and thereby launching the container more quickly).

    The eessi_container.sh script may (re)-use a cache directory provided via $SINGULARITY_CACHEDIR (or $APPTAINER_CACHEDIR when using Apptainer). Hence, the container image does not have to be downloaded again even when starting a new session. The example below illustrates this.

    export SINGULARITY_CACHEDIR=${PWD}/container_cache_dir\ntime ./eessi_container.sh <<< \"ls /cvmfs/software.eessi.io\"\n
    which should produce output similar to
    Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\nhost_injections  latest  versions\n\nreal    m40.445s\nuser    3m2.621s\nsys     0m7.402s\n
    The next run using the same cache directory, e.g., by simply executing
    time ./eessi_container.sh <<< \"ls /cvmfs/software.eessi.io\"\n
    is much faster
    Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\nhost_injections  latest  versions\n\nreal    0m2.781s\nuser    0m0.172s\nsys     0m0.436s\n

    Note

    Each run of eessi_container.sh (without specifying --resume) creates a new temporary directory. The temporary directory stores, among other data, the image file of the container. Thus we can ensure that the container is available locally for a subsequent run.

    However, this may quickly consume scarce resources, for example, a small partition where /tmp is located (default for temporary storage, see --help for specifying a different location).

    See next section for making sure to clean up no longer needed temporary data.

    "},{"location":"getting_access/eessi_container/#reducing-disk-usage","title":"Reducing disk usage","text":"

    By default eessi_container.sh creates a temporary directory under /tmp. The directories are named eessi.RANDOM where RANDOM is a 10-character string. The script does not automatically remove these directories. To determine their total disk usage, simply run

    du -sch /tmp/eessi.*\n
    which could result in output similar to
    333M    /tmp/eessi.session123\n333M    /tmp/eessi.session456\n333M    /tmp/eessi.session789\n997M    total\n
    Clean up disk usage by simply removing directories you do not need any longer.

    "},{"location":"getting_access/eessi_container/#eessi-container-image","title":"EESSI container image","text":"

    If you would like to directly use an EESSI container image, you can do so by configuring apptainer to correctly mount the CVMFS repository:

    # honor $TMPDIR if it is already defined, use /tmp otherwise\nif [ -z $TMPDIR ]; then\n    export WORKDIR=/tmp/$USER\nelse\n    export WORKDIR=$TMPDIR/$USER\nfi\n\nmkdir -p ${WORKDIR}/{var-lib-cvmfs,var-run-cvmfs,home}\nexport SINGULARITY_BIND=\"${WORKDIR}/var-run-cvmfs:/var/run/cvmfs,${WORKDIR}/var-lib-cvmfs:/var/lib/cvmfs\"\nexport SINGULARITY_HOME=\"${WORKDIR}/home:/home/$USER\"\nexport EESSI_REPO=\"container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io\"\nexport EESSI_CONTAINER=\"docker://ghcr.io/eessi/client:centos7\"\nsingularity shell --fusemount \"$EESSI_REPO\" \"$EESSI_CONTAINER\"\n
    "},{"location":"getting_access/is_eessi_accessible/","title":"Is EESSI accessible?","text":"

    EESSI can be accessed via a native (CernVM-FS) installation, or via a container that includes CernVM-FS.

    Before you look into these options, check if EESSI is already accessible on your system.

    Run the following command:

    ls /cvmfs/software.eessi.io\n

    Note

    This ls command may take a couple of seconds to finish, since CernVM-FS may need to download or update the metadata for that directory.

    If you see output like shown below, you already have access to EESSI on your system.

    host_injections  latest  versions\n

    For starting to use EESSI, continue reading about Setting up environment.

    If you see an error message as shown below, EESSI is not yet accessible on your system.

    ls: /cvmfs/software.eessi.io: No such file or directory\n
    No worries, you don't need to be a to get access to EESSI.

    Continue reading about the Native installation of EESSI, or access via the EESSI container.

    "},{"location":"getting_access/native_installation/","title":"Native installation","text":"

    Setting up native access to EESSI, that is a system-wide deployment that does not require workarounds like using a container, requires the installation and configuration of CernVM-FS.

    This requires admin privileges, since you need to install CernVM-FS as an OS package.

    The following actions must be taken for a (basic) native installation of EESSI:

    • Installing CernVM-FS itself, ideally using the OS packages provided by the CernVM-FS project (although installing from source is also possible);
    • Installing the EESSI configuration for CernVM-FS, which can be done by installing the cvmfs-config-eessi package that we provide for the most popular Linux distributions (more information available here);
    • Creating a small client configuration file for CernVM-FS (/etc/cvmfs/default.local); see also the CernVM-FS documentation.

    The good news is that all of this only requires a handful commands :

    RHEL-based Linux distributionsDebian-based Linux distributions
    # Installation commands for RHEL-based distros like CentOS, Rocky Linux, Almalinux, Fedora, ...\n\n# install CernVM-FS\nsudo yum install -y https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release-latest.noarch.rpm\nsudo yum install -y cvmfs\n\n# install EESSI configuration for CernVM-FS\nsudo yum install -y https://github.com/EESSI/filesystem-layer/releases/download/latest/cvmfs-config-eessi-latest.noarch.rpm\n\n# create client configuration file for CernVM-FS (no squid proxy, 10GB local CernVM-FS client cache)\nsudo bash -c \"echo 'CVMFS_CLIENT_PROFILE=\"single\"' > /etc/cvmfs/default.local\"\nsudo bash -c \"echo 'CVMFS_QUOTA_LIMIT=10000' >> /etc/cvmfs/default.local\"\n\n# make sure that EESSI CernVM-FS repository is accessible\nsudo cvmfs_config setup\n
    # Installation commands for Debian-based distros like Ubuntu, ...\n\n# install CernVM-FS\nsudo apt-get install lsb-release\nwget https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release-latest_all.deb\nsudo dpkg -i cvmfs-release-latest_all.deb\nrm -f cvmfs-release-latest_all.deb\nsudo apt-get update\nsudo apt-get install -y cvmfs\n\n# install EESSI configuration for CernVM-FS\nwget https://github.com/EESSI/filesystem-layer/releases/download/latest/cvmfs-config-eessi_latest_all.deb\nsudo dpkg -i cvmfs-config-eessi_latest_all.deb\n\n# create client configuration file for CernVM-FS (no squid proxy, 10GB local CernVM-FS client cache)\nsudo bash -c \"echo 'CVMFS_CLIENT_PROFILE=\"single\"' > /etc/cvmfs/default.local\"\nsudo bash -c \"echo 'CVMFS_QUOTA_LIMIT=10000' >> /etc/cvmfs/default.local\"\n\n# make sure that EESSI CernVM-FS repository is accessible\nsudo cvmfs_config setup\n

    Note

    The commands above only cover the basic installation of EESSI.

    This is good enough for an individual client, or for testing purposes, but for a production-quality setup you should also set up a Squid proxy cache.

    For large-scale systems, like an HPC cluster, you should also consider setting up your own CernVM-FS Stratum-1 mirror server.

    For more details on this, please refer to the Stratum 1 and proxies section of the CernVM-FS tutorial.

    "},{"location":"meetings/2022-09-amsterdam/","title":"EESSI Community Meeting (Sept'22, Amsterdam)","text":""},{"location":"meetings/2022-09-amsterdam/#practical-info","title":"Practical info","text":"
    • dates: Wed-Fri 14-16 Sept'22
    • in conjunction with CernVM workshop @ Nikhef (Mon-Tue 12-13 Sept'22)
    • venue: \"Polderzaal\" at Cafe-Restaurant Polder (Google Maps), sponsored by SURF
    • registration (closed since Fri 9 Sept'22)
    • Slack channel: community-meeting-2022 in EESSI Slack
    • YouTube playlist with recorded talks
    "},{"location":"meetings/2022-09-amsterdam/#agenda","title":"Agenda","text":"

    (subject to changes)

    We envision a mix of presentations, experience reports, demos, and hands-on sessions and/or hackathons related to the EESSI project.

    If you would like to give a talk or host a session, please let us know via the EESSI Slack!

    "},{"location":"meetings/2022-09-amsterdam/#wed-14-sept-2022","title":"Wed 14 Sept 2022","text":"
    • [10:00-13:00] Welcome session
      • [10:00-10:30] Walk-in, coffee
      • [10:30-12:00] Round table discussion (not live-streamed!)
    • [12:00-13:00] Lunch
    • [13:00-15:00] Presentations on EESSI
      • [13:00-13:30] Introduction to EESSI (Caspar) [slides - recording]
      • [13:30-14:00] Hands-on: how to use EESSI (Kenneth) [slides - recording]
      • [14:00-14:30] EESSI use cases (Kenneth) [(slides - recording]
      • [14:30-15:00] EESSI for sysadmins (Thomas) [slides - recording]
    • [15:00-15:30] Coffee break
    • [15:30-17:00] Presentations on EESSI (continued)
      • [15:30-16:00] Hands-on: installing EESSI (Thomas/Kenneth)
      • [16:00-16:45] ComputeCanada site talk (Bart Oldeman, remote) [slides - recording]
      • [16:45-17:15] Magic Castle (Felix-Antoine Fortin, remote) [slides - recording]
    • [19:00-...] Group dinner @ Saravanaa Bhavan (sponsored by Dell Technologies)
      • address: Stadhouderskade 123-124, Amsterdam
    "},{"location":"meetings/2022-09-amsterdam/#thu-15-sept-2022","title":"Thu 15 Sept 2022","text":"
    • [09:30-12:00] More focused presentations on aspects of EESSI
      • [09:30-10:00] EESSI behind the scenes: compat layer (Bob) [slides - recording]
      • [10:00-10:30] EESSI behind the scenes: software layer (Kenneth) [slides - recording]
      • [10:30-11:00] Coffee break
      • [11:00-11:30] EESSI behind the scenes: infrastructure (Terje) [slides - recording]
      • [11:30-12:00] Status on RISC-V support (Kenneth) [slides - recording]
    • [12:00-13:00] Lunch
    • [13:00-14:00] Discussions/hands-on sessions/hackathon
    • [14:00-14:30] Status on GPU support (Alan) [slides - recording]
    • [14:30-15:00] Status on build-and-deploy bot (Thomas) [slides - recording]
    • [15:00-15:30] Coffee break
    • [15:30-17:00] Discussions/hands-on sessions/hackathon (continued)
      • Hands-on with GPUs (Alan)
      • Hands-on with bot (Thomas/Kenneth)
    • [19:00-...] Group dinner @ Italia Oggi (sponsored by HPC-UGent)
      • address: Binnen Bantammerstraat 11, Amsterdam
    "},{"location":"meetings/2022-09-amsterdam/#fri-16-sept-2022","title":"Fri 16 Sept 2022","text":"
    • [09:30-12:00] Presentations on future work
      • [09:30-10:00] Testing in software layer (Caspar) [slides - recording]
      • [10:00-10:30] MultiXscale project (Alan) [slides - recording]
      • [10:30-11:00] Coffee break
      • [11:00-11:30] Short-term future work (Kenneth) [slides - recording]
    • [11:30-12:00] Discussion: future management structure of EESSI (Alan) [slides - recording]
    • [12:00-13:00] Lunch
    • [13:00-14:00] Site reports [recording]
      • NESSI (Thomas) [slides]
      • NLPL (Stephan) [slides]
      • HPCNow! (Danilo) [slides]
      • Azure (Hugo) [slides]
    • [14:00-14:30] Discussion: what would make or break EESSI for your site? (notes - recording)
    • [14:30-15:45] Discussions/hands-on sessions/hackathon
      • Hands-on with GPU support (Alan)
      • Hands-on with bot (Thomas/Kenneth)
      • Hands-on with software testing (Caspar)
    • We need to leave the room by 16:00!
    "},{"location":"repositories/pilot/","title":"Pilot","text":""},{"location":"repositories/pilot/#pilot-software-stack-202112","title":"Pilot software stack (2021.12)","text":""},{"location":"repositories/pilot/#caveats","title":"Caveats","text":"

    Danger

    The EESSI pilot repository is no longer actively maintained, and should not be used for production work.

    Please use the software.eessi.io repository instead.

    The current EESSI pilot software stack (version 2021.12) is the 7th iteration, and there are some known issues and limitations, please take these into account:

    • First of all: the EESSI pilot software stack is NOT READY FOR PRODUCTION!

    Do not use it for production work, and be careful when testing it on production systems!

    "},{"location":"repositories/pilot/#reporting-problems","title":"Reporting problems","text":"

    If you notice any problems, please report them via https://github.com/EESSI/software-layer/issues.

    "},{"location":"repositories/pilot/#accessing-the-eessi-pilot-repository-through-singularity","title":"Accessing the EESSI pilot repository through Singularity","text":"

    The easiest way to access the EESSI pilot repository is by using Singularity. If Singularity is installed already, no admin privileges are required. No other software is needed either on the host.

    A container image is available in the GitHub Container Registry (see https://github.com/EESSI/filesystem-layer/pkgs/container/client-pilot). It only contains a minimal operating system + the necessary packages to access the EESSI pilot repository through CernVM-FS, and it is suitable for aarch64, ppc64le, and x86_64.

    The container image can be used directly by Singularity (no prior download required), as follows:

    • First, create some local directories in /tmp/$USER which will be bind mounted in the container:

      mkdir -p /tmp/$USER/{var-lib-cvmfs,var-run-cvmfs,home}\n
      These provides space for the CernVM-FS cache, and an empty home directory to use in the container.

    • Set the $SINGULARITY_BIND and $SINGULARITY_HOME environment variables to configure Singularity:

      export SINGULARITY_BIND=\"/tmp/$USER/var-run-cvmfs:/var/run/cvmfs,/tmp/$USER/var-lib-cvmfs:/var/lib/cvmfs\"\nexport SINGULARITY_HOME=\"/tmp/$USER/home:/home/$USER\"\n

    • Start the container using singularity shell, using --fusemount to mount the EESSI pilot repository (using the cvmfs2 command that is included in the container image):

      export EESSI_PILOT=\"container:cvmfs2 pilot.eessi-hpc.org /cvmfs/pilot.eessi-hpc.org\"\nsingularity shell --fusemount \"$EESSI_PILOT\" docker://ghcr.io/eessi/client-pilot:centos7\n

    • This should give you a shell in the container, where the EESSI pilot repository is mounted:

      $ singularity shell --fusemount \"$EESSI_PILOT\" docker://ghcr.io/eessi/client-pilot:centos7\nINFO:    Using cached SIF image\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nSingularity>\n

    • It is possible that you see some scary looking warnings, but those can be ignored for now.

    To verify that things are working, check the contents of the /cvmfs/pilot.eessi-hpc.org/versions/2021.12 directory:

    Singularity> ls /cvmfs/pilot.eessi-hpc.org/versions/2021.12\ncompat  init  software\n

    "},{"location":"repositories/pilot/#standard-installation","title":"Standard installation","text":"

    For those with privileges on their system, there are a number of example installation scripts for different architectures and operating systems available in the EESSI demo repository.

    Here we prefer the Singularity approach as we can guarantee that the container image is up to date.

    "},{"location":"repositories/pilot/#setting-up-the-eessi-environment","title":"Setting up the EESSI environment","text":"

    Once you have the EESSI pilot repository mounted, you can set up the environment by sourcing the provided init script:

    source /cvmfs/pilot.eessi-hpc.org/versions/2021.12/init/bash\n

    If all goes well, you should see output like this:

    Found EESSI pilot repo @ /cvmfs/pilot.eessi-hpc.org/versions/2021.12!\nUsing x86_64/intel/haswell as software subdirectory.\nUsing /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/modules/all as the directory to be added to MODULEPATH.\nFound Lmod configuration file at /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/.lmod/lmodrc.lua\nInitializing Lmod...\nPrepending /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI pilot software stack, have fun!\n[EESSI pilot 2021.12] $ \n

    Now you're all set up! Go ahead and explore the software stack using \"module avail\", and go wild with testing the available software installations!

    "},{"location":"repositories/pilot/#testing-the-eessi-pilot-software-stack","title":"Testing the EESSI pilot software stack","text":"

    Please test the EESSI pilot software stack as you see fit: running simple commands, performing small calculations or running small benchmarks, etc.

    Test scripts that have been verified to work correctly using the pilot software stack are available at https://github.com/EESSI/software-layer/tree/main/tests .

    "},{"location":"repositories/pilot/#giving-feedback-or-reporting-problems","title":"Giving feedback or reporting problems","text":"

    Any feedback is welcome, and questions or problems reports are welcome as well, through one of the EESSI communication channels:

    • (preferred!) EESSI software-layer GitHub repository: https://github.com/EESSI/software-layer/issues
    • EESSI mailing list (eessi@list.rug.nl)
    • EESSI Slack: https://eessi-hpc.slack.com (get an invite via https://www.eessi-hpc.org/join)
    • monthly EESSI meetings (first Thursday of the month at 2pm CEST)
    "},{"location":"repositories/pilot/#available-software","title":"Available software","text":"

    (last update: Mar 21st 2022)

    EESSI currently supports the following HPC applications as well as all their dependencies:

    • GROMACS (2020.1 and 2020.4)
    • OpenFOAM (v2006 and 8)
    • R (4.0.0) + R-bundle-Bioconductor (3.11) + RStudio Server (1.3.1093)
    • TensorFlow (2.3.1) and Horovod (0.21.3)
    • OSU-Micro-Benchmarks (5.6.3)
    • ReFrame (3.9.1)
    • Spark (3.1.1)
    • IPython (7.15.0)
    • QuantumESPRESSO (6.6) (currently not available on ppc64le)
    • WRF (3.9.1.1)
    [EESSI pilot 2021.12] $ module --nx avail\n\n--------------------------- /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/modules/all ----------------------------\n   ant/1.10.8-Java-11                                              LMDB/0.9.24-GCCcore-9.3.0\n   Arrow/0.17.1-foss-2020a-Python-3.8.2                            lz4/1.9.2-GCCcore-9.3.0\n   Bazel/3.6.0-GCCcore-9.3.0                                       Mako/1.1.2-GCCcore-9.3.0\n   Bison/3.5.3-GCCcore-9.3.0                                       MariaDB-connector-c/3.1.7-GCCcore-9.3.0\n   Boost/1.72.0-gompi-2020a                                        matplotlib/3.2.1-foss-2020a-Python-3.8.2\n   cairo/1.16.0-GCCcore-9.3.0                                      Mesa/20.0.2-GCCcore-9.3.0\n   CGAL/4.14.3-gompi-2020a-Python-3.8.2                            Meson/0.55.1-GCCcore-9.3.0-Python-3.8.2\n   CMake/3.16.4-GCCcore-9.3.0                                      METIS/5.1.0-GCCcore-9.3.0\n   CMake/3.20.1-GCCcore-10.3.0                                     MPFR/4.0.2-GCCcore-9.3.0\n   code-server/3.7.3                                               NASM/2.14.02-GCCcore-9.3.0\n   DB/18.1.32-GCCcore-9.3.0                                        ncdf4/1.17-foss-2020a-R-4.0.0\n   DB/18.1.40-GCCcore-10.3.0                                       netCDF-Fortran/4.5.2-gompi-2020a\n   double-conversion/3.1.5-GCCcore-9.3.0                           netCDF/4.7.4-gompi-2020a\n   Doxygen/1.8.17-GCCcore-9.3.0                                    nettle/3.6-GCCcore-9.3.0\n   EasyBuild/4.5.0                                                 networkx/2.4-foss-2020a-Python-3.8.2\n   EasyBuild/4.5.1                                         (D)     Ninja/1.10.0-GCCcore-9.3.0\n   Eigen/3.3.7-GCCcore-9.3.0                                       NLopt/2.6.1-GCCcore-9.3.0\n   Eigen/3.3.9-GCCcore-10.3.0                                      NSPR/4.25-GCCcore-9.3.0\n   ELPA/2019.11.001-foss-2020a                                     NSS/3.51-GCCcore-9.3.0\n   expat/2.2.9-GCCcore-9.3.0                                       nsync/1.24.0-GCCcore-9.3.0\n   expat/2.2.9-GCCcore-10.3.0                                      numactl/2.0.13-GCCcore-9.3.0\n   FFmpeg/4.2.2-GCCcore-9.3.0                                      numactl/2.0.14-GCCcore-10.3.0\n   FFTW/3.3.8-gompi-2020a                                          OpenBLAS/0.3.9-GCC-9.3.0\n   FFTW/3.3.9-gompi-2021a                                          OpenBLAS/0.3.15-GCC-10.3.0\n   flatbuffers/1.12.0-GCCcore-9.3.0                                OpenFOAM/v2006-foss-2020a\n   FlexiBLAS/3.0.4-GCC-10.3.0                                      OpenFOAM/8-foss-2020a                              (D)\n   fontconfig/2.13.92-GCCcore-9.3.0                                OpenMPI/4.0.3-GCC-9.3.0\n   foss/2020a                                                      OpenMPI/4.1.1-GCC-10.3.0\n   foss/2021a                                                      OpenPGM/5.2.122-GCCcore-9.3.0\n   freetype/2.10.1-GCCcore-9.3.0                                   OpenSSL/1.1                                        (D)\n   FriBidi/1.0.9-GCCcore-9.3.0                                     OSU-Micro-Benchmarks/5.6.3-gompi-2020a\n   GCC/9.3.0                                                       Pango/1.44.7-GCCcore-9.3.0\n   GCC/10.3.0                                                      ParaView/5.8.0-foss-2020a-Python-3.8.2-mpi\n   GCCcore/9.3.0                                                   PCRE/8.44-GCCcore-9.3.0\n   GCCcore/10.3.0                                                  PCRE2/10.34-GCCcore-9.3.0\n   Ghostscript/9.52-GCCcore-9.3.0                                  Perl/5.30.2-GCCcore-9.3.0\n   giflib/5.2.1-GCCcore-9.3.0                                      Perl/5.32.1-GCCcore-10.3.0\n   git/2.23.0-GCCcore-9.3.0-nodocs                                 pixman/0.38.4-GCCcore-9.3.0\n   git/2.32.0-GCCcore-10.3.0-nodocs                        (D)     pkg-config/0.29.2-GCCcore-9.3.0\n   GLib/2.64.1-GCCcore-9.3.0                                       pkg-config/0.29.2-GCCcore-10.3.0\n   GLPK/4.65-GCCcore-9.3.0                                         pkg-config/0.29.2                                  (D)\n   GMP/6.2.0-GCCcore-9.3.0                                         pkgconfig/1.5.1-GCCcore-9.3.0-Python-3.8.2\n   GMP/6.2.1-GCCcore-10.3.0                                        PMIx/3.1.5-GCCcore-9.3.0\n   gnuplot/5.2.8-GCCcore-9.3.0                                     PMIx/3.2.3-GCCcore-10.3.0\n   GObject-Introspection/1.64.0-GCCcore-9.3.0-Python-3.8.2         poetry/1.0.9-GCCcore-9.3.0-Python-3.8.2\n   gompi/2020a                                                     protobuf-python/3.13.0-foss-2020a-Python-3.8.2\n   gompi/2021a                                                     protobuf/3.13.0-GCCcore-9.3.0\n   groff/1.22.4-GCCcore-9.3.0                                      pybind11/2.4.3-GCCcore-9.3.0-Python-3.8.2\n   groff/1.22.4-GCCcore-10.3.0                                     pybind11/2.6.2-GCCcore-10.3.0\n   GROMACS/2020.1-foss-2020a-Python-3.8.2                          Python/2.7.18-GCCcore-9.3.0\n   GROMACS/2020.4-foss-2020a-Python-3.8.2                  (D)     Python/3.8.2-GCCcore-9.3.0\n   GSL/2.6-GCC-9.3.0                                               Python/3.9.5-GCCcore-10.3.0-bare\n   gzip/1.10-GCCcore-9.3.0                                         Python/3.9.5-GCCcore-10.3.0\n   h5py/2.10.0-foss-2020a-Python-3.8.2                             PyYAML/5.3-GCCcore-9.3.0\n   HarfBuzz/2.6.4-GCCcore-9.3.0                                    Qt5/5.14.1-GCCcore-9.3.0\n   HDF5/1.10.6-gompi-2020a                                         QuantumESPRESSO/6.6-foss-2020a\n   Horovod/0.21.3-foss-2020a-TensorFlow-2.3.1-Python-3.8.2         R-bundle-Bioconductor/3.11-foss-2020a-R-4.0.0\n   hwloc/2.2.0-GCCcore-9.3.0                                       R/4.0.0-foss-2020a\n   hwloc/2.4.1-GCCcore-10.3.0                                      re2c/1.3-GCCcore-9.3.0\n   hypothesis/6.13.1-GCCcore-10.3.0                                RStudio-Server/1.3.1093-foss-2020a-Java-11-R-4.0.0\n   ICU/66.1-GCCcore-9.3.0                                          Rust/1.52.1-GCCcore-10.3.0\n   ImageMagick/7.0.10-1-GCCcore-9.3.0                              ScaLAPACK/2.1.0-gompi-2020a\n   IPython/7.15.0-foss-2020a-Python-3.8.2                          ScaLAPACK/2.1.0-gompi-2021a-fb\n   JasPer/2.0.14-GCCcore-9.3.0                                     scikit-build/0.10.0-foss-2020a-Python-3.8.2\n   Java/11.0.2                                             (11)    SciPy-bundle/2020.03-foss-2020a-Python-3.8.2\n   jbigkit/2.1-GCCcore-9.3.0                                       SciPy-bundle/2021.05-foss-2021a\n   JsonCpp/1.9.4-GCCcore-9.3.0                                     SCOTCH/6.0.9-gompi-2020a\n   LAME/3.100-GCCcore-9.3.0                                        snappy/1.1.8-GCCcore-9.3.0\n   libarchive/3.5.1-GCCcore-10.3.0                                 Spark/3.1.1-foss-2020a-Python-3.8.2\n   libcerf/1.13-GCCcore-9.3.0                                      SQLite/3.31.1-GCCcore-9.3.0\n   libdrm/2.4.100-GCCcore-9.3.0                                    SQLite/3.35.4-GCCcore-10.3.0\n   libevent/2.1.11-GCCcore-9.3.0                                   SWIG/4.0.1-GCCcore-9.3.0\n   libevent/2.1.12-GCCcore-10.3.0                                  Szip/2.1.1-GCCcore-9.3.0\n   libfabric/1.11.0-GCCcore-9.3.0                                  Tcl/8.6.10-GCCcore-9.3.0\n   libfabric/1.12.1-GCCcore-10.3.0                                 Tcl/8.6.11-GCCcore-10.3.0\n   libffi/3.3-GCCcore-9.3.0                                        tcsh/6.22.02-GCCcore-9.3.0\n   libffi/3.3-GCCcore-10.3.0                                       TensorFlow/2.3.1-foss-2020a-Python-3.8.2\n   libgd/2.3.0-GCCcore-9.3.0                                       time/1.9-GCCcore-9.3.0\n   libGLU/9.0.1-GCCcore-9.3.0                                      Tk/8.6.10-GCCcore-9.3.0\n   libglvnd/1.2.0-GCCcore-9.3.0                                    Tkinter/3.8.2-GCCcore-9.3.0\n   libiconv/1.16-GCCcore-9.3.0                                     UCX/1.8.0-GCCcore-9.3.0\n   libjpeg-turbo/2.0.4-GCCcore-9.3.0                               UCX/1.10.0-GCCcore-10.3.0\n   libpciaccess/0.16-GCCcore-9.3.0                                 UDUNITS/2.2.26-foss-2020a\n   libpciaccess/0.16-GCCcore-10.3.0                                UnZip/6.0-GCCcore-9.3.0\n   libpng/1.6.37-GCCcore-9.3.0                                     UnZip/6.0-GCCcore-10.3.0\n   libsndfile/1.0.28-GCCcore-9.3.0                                 WRF/3.9.1.1-foss-2020a-dmpar\n   libsodium/1.0.18-GCCcore-9.3.0                                  X11/20200222-GCCcore-9.3.0\n   LibTIFF/4.1.0-GCCcore-9.3.0                                     x264/20191217-GCCcore-9.3.0\n   libtirpc/1.2.6-GCCcore-9.3.0                                    x265/3.3-GCCcore-9.3.0\n   libunwind/1.3.1-GCCcore-9.3.0                                   xorg-macros/1.19.2-GCCcore-9.3.0\n   libxc/4.3.4-GCC-9.3.0                                           xorg-macros/1.19.3-GCCcore-10.3.0\n   libxml2/2.9.10-GCCcore-9.3.0                                    Xvfb/1.20.9-GCCcore-9.3.0\n   libxml2/2.9.10-GCCcore-10.3.0                                   Yasm/1.3.0-GCCcore-9.3.0\n   libyaml/0.2.2-GCCcore-9.3.0                                     ZeroMQ/4.3.2-GCCcore-9.3.0\n   LittleCMS/2.9-GCCcore-9.3.0                                     Zip/3.0-GCCcore-9.3.0\n   LLVM/9.0.1-GCCcore-9.3.0                                        zstd/1.4.4-GCCcore-9.3.0\n
    "},{"location":"repositories/pilot/#architecture-and-micro-architecture-support","title":"Architecture and micro-architecture support","text":""},{"location":"repositories/pilot/#x86_64","title":"x86_64","text":"
    • generic (currently implies march=x86-64 and -mtune=generic)
    • AMD
      • zen2 (Rome)
      • zen3 (Milan)
    • Intel
      • haswell
      • skylake_avx512
    "},{"location":"repositories/pilot/#aarch64arm64","title":"aarch64/arm64","text":"
    • generic (currently implies -march=armv8-a and -mtune=generic)
    • AWS Graviton2
    "},{"location":"repositories/pilot/#ppc64le","title":"ppc64le","text":"
    • generic
    • power9le
    "},{"location":"repositories/pilot/#easybuild-configuration","title":"EasyBuild configuration","text":"

    EasyBuild v4.5.1 was used to install the software in the 2021.12 version of the pilot repository. For some installations pull requests with changes that will be included in later EasyBuild versions were leveraged, see the build script that was used.

    An example configuration of the build environment based on https://github.com/EESSI/software-layer can be seen here:

    $ eb --show-config\n#\n# Current EasyBuild configuration\n# (C: command line argument, D: default value, E: environment variable, F: configuration file)\n#\nbuildpath         (E) = /tmp/eessi-build/easybuild/build\ncontainerpath     (E) = /tmp/eessi-build/easybuild/containers\ndebug             (E) = True\nfilter-deps       (E) = Autoconf, Automake, Autotools, binutils, bzip2, cURL, DBus, flex, gettext, gperf, help2man, intltool, libreadline, libtool, Lua, M4, makeinfo, ncurses, util-linux, XZ, zlib\nfilter-env-vars   (E) = LD_LIBRARY_PATH\nhooks             (E) = /home/eessi-build/software-layer/eb_hooks.py\nignore-osdeps     (E) = True\ninstallpath       (E) = /cvmfs/pilot.eessi-hpc.org/2021.06/software/linux/x86_64/intel/haswell\nmodule-extensions (E) = True\npackagepath       (E) = /tmp/eessi-build/easybuild/packages\nprefix            (E) = /tmp/eessi-build/easybuild\nrepositorypath    (E) = /tmp/eessi-build/easybuild/ebfiles_repo\nrobot-paths       (D) = /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/software/EasyBuild/4.5.1/easybuild/easyconfigs\nrpath             (E) = True\nsourcepath        (E) = /tmp/eessi-build/easybuild/sources:\nsysroot           (E) = /cvmfs/pilot.eessi-hpc.org/versions/2021.12/compat/linux/x86_64\ntrace             (E) = True\nzip-logs          (E) = bzip2\n

    "},{"location":"repositories/pilot/#infrastructure-status","title":"Infrastructure status","text":"

    The status of the CernVM-FS infrastructure for the pilot repository is shown at http://status.eessi.io/pilot/.

    "},{"location":"repositories/riscv.eessi.io/","title":"EESSI RISC-V development repository (riscv.eessi.io)","text":"

    This repository contains development versions of an EESSI RISC-V software stack. Note that versions may be added, modified, or deleted at any time.

    "},{"location":"repositories/riscv.eessi.io/#accessing-the-risc-v-repository","title":"Accessing the RISC-V repository","text":"

    See Getting access; by making the EESSI CVMFS domain available, you will automatically have access to riscv.eessi.io as well.

    "},{"location":"repositories/riscv.eessi.io/#using-riscveessiio","title":"Using riscv.eessi.io","text":"

    This repository currently offers one version (20240402), and this contains both a compatibility layer and a software layer. Furthermore, initialization scripts are in place to set up the repository:

    $ source /cvmfs/riscv.eessi.io/versions/20240402/init/bash\nFound EESSI repo @ /cvmfs/riscv.eessi.io/versions/20240402!\narchdetect says riscv64/generic\nUsing riscv64/generic as software subdirectory.\nFound Lmod configuration file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/lmodrc.lua\nFound Lmod SitePackage.lua file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/SitePackage.lua\nUsing /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all as the directory to be added to MODULEPATH.\nInitializing Lmod...\nPrepending /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI (20240402), have fun!\n{EESSI 20240402} $\n

    You can even source the initialization script of the software.eessi.io production repository now, and it will automatically set up the RISC-V repository for you:

    $ source /cvmfs/software.eessi.io/versions/2023.06/init/bash \nRISC-V architecture detected, but there is no RISC-V support yet in the production repository.\nAutomatically switching to version 20240402 of the RISC-V development repository /cvmfs/riscv.eessi.io.\nFor more details about this repository, see https://www.eessi.io/docs/repositories/riscv.eessi.io/.\n\nFound EESSI repo @ /cvmfs/riscv.eessi.io/versions/20240402!\narchdetect says riscv64/generic\nUsing riscv64/generic as software subdirectory.\nFound Lmod configuration file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/lmodrc.lua\nFound Lmod SitePackage.lua file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/SitePackage.lua\nUsing /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all as the directory to be added to MODULEPATH.\nUsing /cvmfs/riscv.eessi.io/host_injections/20240402/software/linux/riscv64/generic/modules/all as the site extension directory to be added to MODULEPATH.\nInitializing Lmod...\nPrepending /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all to $MODULEPATH...\nPrepending site path /cvmfs/riscv.eessi.io/host_injections/20240402/software/linux/riscv64/generic/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI (20240402), have fun!\n{EESSI 20240402} $ \n

    Note that we currently only provide generic builds, hence riscv64/generic is being used for all RISC-V CPUs.

    The amount of software is constantly increasing. Besides having the foss/2023b toolchain available, applications like dlb, GROMACS, OSU Micro-Benchmarks, and R are already available as well. Use module avail to get a full and up-to-date listing of available software.

    "},{"location":"repositories/riscv.eessi.io/#infrastructure-status","title":"Infrastructure status","text":"

    The status of the CernVM-FS infrastructure for this repository is shown at https://status.eessi.io.

    "},{"location":"repositories/software.eessi.io/","title":"Production EESSI repository (software.eessi.io)","text":""},{"location":"repositories/software.eessi.io/#question-or-problems","title":"Question or problems","text":"

    If you have any questions regarding EESSI, or if you experience a problem in accessing or using it, please open a support request.

    "},{"location":"repositories/software.eessi.io/#accessing-the-eessi-repository","title":"Accessing the EESSI repository","text":"

    See Getting access.

    "},{"location":"repositories/software.eessi.io/#using-softwareeessiio","title":"Using software.eessi.io","text":"

    See Using EESSI.

    "},{"location":"repositories/software.eessi.io/#available-software","title":"Available software","text":"

    Detailed overview of available software coming soon!

    For now, use module avail after initializing the EESSI environment.

    "},{"location":"repositories/software.eessi.io/#architecture-and-micro-architecture-support","title":"Architecture and micro-architecture support","text":"

    See CPU targets.

    "},{"location":"repositories/software.eessi.io/#infrastructure-status","title":"Infrastructure status","text":"

    The status of the CernVM-FS infrastructure for the production repository is shown at https://status.eessi.io.

    "},{"location":"software_layer/build_nodes/","title":"Build nodes","text":"

    Any system can be used as a build node to create additional software installations that should be added to the EESSI CernVM-FS repository.

    "},{"location":"software_layer/build_nodes/#requirements","title":"Requirements","text":"

    OS and software:

    • GNU/Linux (any distribution) as operating system;
    • a recent version of Singularity (>= 3.6 is recommended);
      • check with singularity --version
    • screen or tmux is highly recommended;

    Admin privileges are not required, as long as Singularity is installed.

    Resources:

    • 8 or more cores is recommended (though not strictly required);
    • at least 50GB of free space on a local filesystem (like /tmp);
    • at least 16GB of memory (2GB/core or higher recommended);

    Instructions to install Singularity and screen (click to show commands):

    CentOS 8 (x86_64 or aarch64 or ppc64le)
    sudo dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm\nsudo dnf update -y\nsudo dnf install -y screen singularity\n
    "},{"location":"software_layer/build_nodes/#setting-up-the-container","title":"Setting up the container","text":"

    Warning

    It is highly recommended to start a screen or tmux session first!

    A container image is provided that includes everything that is required to set up a writable overlay on top of the EESSI CernVM-FS repository.

    First, pick a location on a local filesystem for the temporary directory:

    Requirements:

    • Do not use a shared filesystem like NFS, Lustre or GPFS.
    • There should be at least 50GB of free disk space in this local filesystem (more is better).
    • There should be no automatic cleanup of old files via a cron job on this local filesystem.
    • Try to make sure the directory is unique (not used by anything else).

    NB. If you are going to install on a separate drive (due to lack of space on /), then you need to set some variables to point to that location. You will also need to bind mount it in the singularity command. Let's say that you drive is mounted in /srt. Then you change the relevant commands below to this:

    export EESSI_TMPDIR=/srt/$USER/EESSI\nmkdir -p $EESSI_TMPDIR\nmkdir /srt/tmp\nexport SINGULARITY_BIND=\"$EESSI_TMPDIR/var-run-cvmfs:/var/run/cvmfs,$EESSI_TMPDIR/var-lib-cvmfs:/var/lib/cvmfs,/srt/tmp:/tmp\"\nsingularity shell -B /srt --fusemount \"$EESSI_READONLY\" --fusemount \"$EESSI_WRITABLE_OVERLAY\" docker://ghcr.io/eessi/build-node:debian11\n

    We will assume that /tmp/$USER/EESSI meets these requirements:

    export EESSI_TMPDIR=/tmp/$USER/EESSI\nmkdir -p $EESSI_TMPDIR\n

    Create some subdirectories in this temporary directory:

    mkdir -p $EESSI_TMPDIR/{home,overlay-upper,overlay-work}\nmkdir -p $EESSI_TMPDIR/{var-lib-cvmfs,var-run-cvmfs}\n

    Configure Singularity cache directory, bind mounts, and (fake) home directory:

    export SINGULARITY_CACHEDIR=$EESSI_TMPDIR/singularity_cache\nexport SINGULARITY_BIND=\"$EESSI_TMPDIR/var-run-cvmfs:/var/run/cvmfs,$EESSI_TMPDIR/var-lib-cvmfs:/var/lib/cvmfs\"\nexport SINGULARITY_HOME=\"$EESSI_TMPDIR/home:/home/$USER\"\n

    Define values to pass to --fusemount` insingularity`` command:

    export EESSI_READONLY=\"container:cvmfs2 software.eessi.io /cvmfs_ro/software.eessi.io\"\nexport EESSI_WRITABLE_OVERLAY=\"container:fuse-overlayfs -o lowerdir=/cvmfs_ro/software.eessi.io -o upperdir=$EESSI_TMPDIR/overlay-upper -o workdir=$EESSI_TMPDIR/overlay-work /cvmfs/software.eessi.io\"\n

    Start the container (which includes Debian 11, CernVM-FS and fuse-overlayfs):

    singularity shell --fusemount \"$EESSI_READONLY\" --fusemount \"$EESSI_WRITABLE_OVERLAY\" docker://ghcr.io/eessi/build-node:debian10\n

    Once the container image has been downloaded and converted to a Singularity image (SIF format), you should get a prompt like this:

    ...\nCernVM-FS: loading Fuse module... done\n\nSingularity>\n

    and the EESSI CernVM-FS repository should be mounted:

    Singularity> ls /cvmfs/software.eessi.io\nhost_injections  README.eessi  versions\n
    "},{"location":"software_layer/build_nodes/#setting-up-the-environment","title":"Setting up the environment","text":"

    Set up the environment by starting a Gentoo Prefix session using the startprefix command.

    Make sure you use the correct version of the EESSI repository!

    export EESSI_VERSION='2023.06' \n/cvmfs/software.eessi.io/versions/${EESSI_VERSION}/compat/linux/$(uname -m)/startprefix\n
    "},{"location":"software_layer/build_nodes/#installing-software","title":"Installing software","text":"

    Clone the software-layer repository:

    git clone https://github.com/EESSI/software-layer.git\n

    Run the software installation script in software-layer:

    cd software-layer\n./EESSI-install-software.sh\n

    This script will figure out the CPU microarchitecture of the host automatically (like x86_64/intel/haswell).

    To build generic software installations (like x86_64/generic), use the --generic option:

    ./EESSI-install-software.sh --generic\n

    Once all missing software has been installed, you should see a message like this:

    No missing modules!\n
    "},{"location":"software_layer/build_nodes/#creating-tarball-to-ingest","title":"Creating tarball to ingest","text":"

    Before tearing down the build node, you should create tarball to ingest into the EESSI CernVM-FS repository.

    To create a tarball of all installations, assuming your build host is x86_64/intel/haswell:

    export EESSI_VERSION='2023.06'\ncd /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/software/linux\neessi_tar_gz=\"$HOME/eessi-${EESSI_VERSION}-haswell.tar.gz\"\ntar cvfz ${eessi_tar_gz} x86_64/intel/haswell\n

    To create a tarball for specific installations, make sure you pick up both the software installation directories and the corresponding module files:

    eessi_tar_gz=\"$HOME/eessi-${EESSI_VERSION}-haswell-OpenFOAM.tar.gz\"\n\ntar cvfz ${eessi_tar_gz} x86_64/intel/haswell/software/OpenFOAM modules/all//OpenFOAM\n

    This tarball should be uploaded to the Stratum 0 server for ingestion. If needed, you can ask for help in the EESSI #software-layer Slack channel

    "},{"location":"software_layer/cpu_targets/","title":"CPU targets","text":"

    In the 2023.06 version of the EESSI repository, the following CPU microarchitectures are supported.

    • aarch64/generic: fallback for Arm 64-bit CPUs (like Raspberri Pi, etc.)
    • aarch64/neoverse_n1: AWS Graviton 2, Ampere Altra, ...
    • aarch64/neoverse_v1: AWS Graviton 3
    • x86_64/generic: fallback for older Intel + AMD CPUs (like Intel Sandy Bridge, ...)
    • x86_64/amd/zen2: AMD Rome
    • x86_64/amd/zen3: AMD Milan, AMD Milan X
    • x86_64/intel/haswell: Intel Haswell, Broadwell
    • x86_64/intel/skylake_avx512: Intel Skylake, Cascade Lake, Ice Lake, ...

    The names of these CPU targets correspond to the names used by archspec.

    "},{"location":"talks/20230615_aws_tech_short/","title":"Making scientific software EESSI - and fast","text":"

    AWS HPC Tech Short (~8 min.) - 15 June 2023

    "},{"location":"talks/2023/20230615_aws_tech_short/","title":"Making scientific software EESSI - and fast","text":"

    AWS HPC Tech Short (~8 min.) - 15 June 2023

    "},{"location":"talks/2023/20231027_packagingcon23_eessi/","title":"Streaming optimized scientific software installations on any Linux distro with EESSI","text":"
    • PackagingCon'2023 (Berlin, Germany) - 27 Oct 2023
    • presented by Kenneth Hoste & Lara Peeters (HPC-UGent)
    • slides (PDF)
    "},{"location":"talks/2023/20231204_cvmfs_hpc/","title":"Best Practices for CernVM-FS in HPC","text":"
    • online tutorial (~3h15min), 4 Dec 2023
    • presented by Kenneth Hoste (HPC-UGent)
    • tutorial website: https://multixscale.github.io/cvmfs-tutorial-hpc-best-practices
    • slides (PDF)
    "},{"location":"talks/2023/20231205_castiel2_eessi_intro/","title":"Streaming Optimised Scientific Software: an Introduction to EESSI","text":"
    • online tutorial (~1h40min) - 5 Dec 2023
    • presented by Alan O'Cais (CECAM)
    • slides (PDF)
    "},{"location":"test-suite/","title":"EESSI test suite","text":"

    The EESSI test suite is a collection of tests that are run using ReFrame. It is used to check whether the software installations included in the EESSI software layer are working and performing as expected.

    To get started, you should look into the installation and configuration guidelines first.

    To write the ReFrame configuration file for your system, check ReFrame configuration file.

    For which software tests are available, see available-tests.md.

    For more information on using the EESSI test suite, see here.

    See also release notes for the EESSI test suite.

    "},{"location":"test-suite/ReFrame-configuration-file/","title":"ReFrame configuration file","text":"

    In order for ReFrame to run tests on your system, it needs to know some properties about your system. For example, it needs to know what kind of job scheduler you have, which partitions the system has, how to submit to those partitions, etc. All of this has to be described in a ReFrame configuration file (see also the section on $RFM_CONFIG_FILES above).

    This page is organized as follows:

    • available ReFrame configuration file
    • Verifying your ReFrame configuration
    • How to write a ReFrame configuration file
    "},{"location":"test-suite/ReFrame-configuration-file/#available-reframe-configuration-file","title":"Available ReFrame configuration file","text":"

    There are some available ReFrame configuration files for HPC systems and public cloud in the config directory for more inspiration. Below is a simple ReFrame configuration file with minimal changes required for getting you started on using the test suite for a CPU partition. Please check that stagedir is set to a path on a (shared) scratch filesystem for storing (temporary) files related to the tests, and access is set to a list of arguments that you would normally pass to the scheduler when submitting to this partition (for example '-p cpu' for submitting to a Slurm partition called cpu).

    To write a ReFrame configuration file for your system, check the section How to write a ReFrame configuration file.

    \"\"\"\nsimple ReFrame configuration file\n\"\"\"\nimport os\n\nfrom eessi.testsuite.common_config import common_logging_config, common_eessi_init, format_perfvars, perflog_format\nfrom eessi.testsuite.constants import *  \n\nsite_configuration = {\n    'systems': [\n        {\n            'name': 'cpu_partition',\n            'descr': 'CPU partition',\n            'modules_system': 'lmod',\n            'hostnames': ['*'],\n            # Note that the stagedir should be a shared directory available on all nodes running ReFrame tests\n            'stagedir': f'/some/shared/dir/{os.environ.get(\"USER\")}/reframe_output/staging',\n            'partitions': [\n                {\n                    'name': 'cpu_partition',\n                    'descr': 'CPU partition',\n                    'scheduler': 'slurm',\n                    'launcher': 'mpirun',\n                    'access':  ['-p cpu', '--export=None'],\n                    'prepare_cmds': ['source %s' % common_eessi_init()],\n                    'environs': ['default'],\n                    'max_jobs': 4,\n                    'resources': [\n                        {\n                            'name': 'memory',\n                            'options': ['--mem={size}'],\n                        }\n                    ],\n                    'features': [\n                        FEATURES[CPU]\n                    ] + list(SCALES.keys()),\n                }\n            ]\n        },\n    ],\n    'environments': [\n        {\n            'name': 'default',\n            'cc': 'cc',\n            'cxx': '',\n            'ftn': '',\n        },\n    ],\n    'logging': common_logging_config(),\n    'general': [\n        {\n            # Enable automatic detection of CPU architecture for each partition\n            # See https://reframe-hpc.readthedocs.io/en/stable/configure.html#auto-detecting-processor-information\n            'remote_detect': True,\n        }\n    ],\n}\n\n# optional logging to syslog\nsite_configuration['logging'][0]['handlers_perflog'].append({\n    'type': 'syslog',\n    'address': '/dev/log',\n    'level': 'info',\n    'format': f'reframe: {perflog_format}',\n    'format_perfvars': format_perfvars,\n    'append': True,\n})\n
    "},{"location":"test-suite/ReFrame-configuration-file/#verifying-your-reframe-configuration","title":"Verifying your ReFrame configuration","text":"

    To verify the ReFrame configuration, you can query the configuration using --show-config.

    To see the full configuration, use:

    reframe --show-config\n

    To only show the configuration of a particular system partition, you can use the --system option. To query a specific setting, you can pass an argument to --show-config.

    For example, to show the configuration of the gpu partition of the example system:

    reframe --system example:gpu --show-config systems/0/partitions\n

    You can drill it down further to only show the value of a particular configuration setting.

    For example, to only show the launcher value for the gpu partition of the example system:

    reframe --system example:gpu --show-config systems/0/partitions/@gpu/launcher\n
    "},{"location":"test-suite/ReFrame-configuration-file/#how-to-write-a-reframe-configuration-file","title":"How to write a ReFrame configuration file","text":"

    The official ReFrame documentation provides the full description on configuring ReFrame for your site. However, there are some configuration settings that are specifically required for the EESSI test suite. Also, there are a large amount of configuration settings available in ReFrame, which makes the official documentation potentially a bit overwhelming.

    Here, we will describe how to create a configuration file that works with the EESSI test suite, starting from an example configuration file settings_example.py, which defines the most common configuration settings.

    "},{"location":"test-suite/ReFrame-configuration-file/#python-imports","title":"Python imports","text":"

    The EESSI test suite standardizes a few string-based values as constants, as well as the logging format used by ReFrame. Every ReFrame configuration file used for running the EESSI test suite should therefore start with the following import statements:

    from eessi.testsuite.common_config import common_logging_config, common_eessi_init\nfrom eessi.testsuite.constants import *\n
    "},{"location":"test-suite/ReFrame-configuration-file/#high-level-system-info-systems","title":"High-level system info (systems)","text":"

    First, we describe the system at its highest level through the systems keyword.

    You can define multiple systems in a single configuration file (systems is a Python list value). We recommend defining just a single system in each configuration file, as it makes the configuration file a bit easier to digest (for humans).

    An example of the systems section of the configuration file would be:

    site_configuration = {\n    'systems': [\n    # We could list multiple systems. Here, we just define one\n        {\n            'name': 'example',\n            'descr': 'Example cluster',\n            'modules_system': 'lmod',\n            'hostnames': ['*'],\n            'stagedir': f'/some/shared/dir/{os.environ.get(\"USER\")}/reframe_output/staging',\n            'partitions': [...],\n        }\n    ]\n}\n

    The most common configuration items defined at this level are:

    • name: The name of the system. Pick whatever makes sense for you.
    • descr: Description of the system. Again, pick whatever you like.
    • modules_system: The modules system used on your system. EESSI provides modules in lmod format. There is no need to change this, unless you want to run tests from the EESSI test suite with non-EESSI modules.
    • hostnames: The names of the hosts on which you will run the ReFrame command, as regular expression. Using these names, ReFrame can automatically determine which of the listed configurations in the systems list to use, which is useful if you're defining multiple systems in a single configuration file. If you follow our recommendation to limit yourself to one system per configuration file, simply define 'hostnames': ['*'].
    • prefix: Prefix directory for a ReFrame run on this system. Any directories or files produced by ReFrame will use this prefix, if not specified otherwise. We recommend setting the $RFM_PREFIX environment variable rather than specifying prefix in your configuration file, so our common logging configuration can pick up on it (see also $RFM_PREFIX).
    • stagedir: A shared directory that is available on all nodes that will execute ReFrame tests. This is used for storing (temporary) files related to the test. Typically, you want to set this to a path on a (shared) scratch filesystem. Defining this is optional: the default is a 'stage' directory inside the prefix directory.
    • partitions: Details on system partitions, see below.
    "},{"location":"test-suite/ReFrame-configuration-file/#partitions","title":"System partitions (systems.partitions)","text":"

    The next step is to add the system partitions to the configuration files, which is also specified as a Python list since a system can have multiple partitions.

    The partitions section of the configuration for a system with two Slurm partitions (one CPU partition, and one GPU partition) could for example look something like this:

    site_configuration = {\n    'systems': [\n        {\n            ...\n            'partitions': [\n                {\n                    'name': 'cpu_partition',\n                    'descr': 'CPU partition'\n                    'scheduler': 'slurm',\n                    'prepare_cmds': ['source %s' % common_eessi_init()],\n                    'launcher': 'mpirun',\n                    'access':  ['-p cpu'],\n                    'environs': ['default'],\n                    'max_jobs': 4,\n                    'features': [\n                        FEATURES[CPU]\n                    ] + list(SCALES.keys()),\n                },\n                {\n                    'name': 'gpu_partition',\n                    'descr': 'GPU partition'\n                    'scheduler': 'slurm',\n                    'prepare_cmds': ['source %s' % common_eessi_init()],\n                    'launcher': 'mpirun',\n                    'access':  ['-p gpu'],\n                    'environs': ['default'],\n                    'max_jobs': 4,\n                    'resources': [\n                        {\n                            'name': '_rfm_gpu',\n                            'options': ['--gpus-per-node={num_gpus_per_node}'],\n                        }\n                    ],\n                    'devices': [\n                        {\n                            'type': DEVICE_TYPES[GPU],\n                            'num_devices': 4,\n                        }\n                    ],\n                    'features': [\n                        FEATURES[CPU],\n                        FEATURES[GPU],\n                    ],\n                    'extras': {\n                        GPU_VENDOR: GPU_VENDORS[NVIDIA],\n                    },\n                },\n            ]\n        }\n    ]\n}\n

    The most common configuration items defined at this level are:

    • name: The name of the partition. Pick anything you like.
    • descr: Description of the partition. Again, pick whatever you like.
    • scheduler: The scheduler used to submit to this partition, for example slurm. All valid options can be found in the ReFrame documentation.
    • launcher: The parallel launcher used on this partition, for example mpirun or srun. All valid options can be found in the ReFrame documentation.
    • access: A list of arguments that you would normally pass to the scheduler when submitting to this partition (for example '-p cpu' for submitting to a Slurm partition called cpu). If supported by your scheduler, we recommend to not export the submission environment (for example by using '--export=None' with Slurm). This avoids test failures due to environment variables set in the submission environment that are passed down to submitted jobs.
    • prepare_cmds: Commands to execute at the start of every job that runs a test. If your batch scheduler does not export the environment of the submit host, this is typically where you can initialize the EESSI environment.
    • environs: The names of the programming environments (to be defined later in the configuration file via environments) that may be used on this partition. A programming environment is required for tests that are compiled first, before they can run. The EESSI test suite however only tests existing software installations, so no compilation (or specific programming environment) is needed. Simply specify 'environs': ['default'], since ReFrame requires that a default environment is defined.
    • max_jobs: The maximum amount of jobs ReFrame is allowed to submit in parallel. Some batch systems limit how many jobs users are allowed to have in the queue. You can use this to make sure ReFrame doesn't exceed that limit.
    • resources: This field defines how additional resources can be requested in a batch job. Specifically, on a GPU partition, you have to define a resource with the name '_rfm_gpu'. The options field should then contain the argument to be passed to the batch scheduler in order to request a certain number of GPUs per node, which could be different for different batch schedulers. For example, when using Slurm you would specify:
      'resources': [\n  {\n      'name': '_rfm_gpu',\n      'options': ['--gpus-per-node={num_gpus_per_node}'],\n  },\n],\n
    • processor: We recommend to NOT define this field, unless CPU autodetection is not working for you. The EESSI test suite relies on information about your processor topology to run. Using CPU autodetection is the easiest way to ensure that all processor-related information needed by the EESSI test suite are defined. Only if CPU autodetection is failing for you do we advice you to set the processor in the partition configuration as an alternative. Although additional fields might be used by future EESSI tests, at this point you'll have to specify at least the following fields:
      'processor': {\n    'num_cpus': 64,  # Total number of CPU cores in a node\n    'num_sockets': 2,  # Number of sockets in a node\n    'num_cpus_per_socket': 32,  # Number of CPU cores per socket\n    'num_cpus_per_core': 1,  # Number of hardware threads per CPU core\n}                 \n
    • features: The features field is used by the EESSI test suite to run tests only on a partition if it supports a certain feature (for example if GPUs are available). Feature names are standardized in the EESSI test suite in eessi.testsuite.constants.FEATURES dictionary. Typically, you want to define features: [FEATURES[CPU]] + list(SCALES.keys()) for CPU based partitions, and features: [FEATURES[GPU]] + list(SCALES.keys()) for GPU based partitions. The first tells the EESSI test suite that this partition can only run CPU-based tests, whereas second indicates that this partition can only run GPU-based tests. You can define a single partition to have both the CPU and GPU features (since features is a Python list). However, since the CPU-based tests will not ask your batch scheduler for GPU resources, this may fail on batch systems that force you to ask for at least one GPU on GPU-based nodes. Also, running CPU-only code on a GPU node is typically considered bad practice, thus testing its functionality is typically not relevant. The list(SCALES.keys()) adds all the scales that may be used by EESSI tests to the features list. These scales are defined in eessi.testsuite.constants.SCALES and define at which scales tests should be run, e.g. single core, half a node, a full node, two nodes, etc. This can be used to exclude running at certain scales on systems that would not support it. E.g. some systems might not support requesting multiple partial nodes, which is what the 1_cpn_2_nodes (1 core per node, on two nodes) and 1_cpn_4_nodes scales do. One could exclude these by setting e.g. features: [FEATURES[CPU]] + [s for s in SCALES if s not in ['1_cpn_2_nodes', '1_cpn_4_nodes']]. With this configuration setting, ReFrame will run all the scales listed in `eessi.testsuite.constants.SCALES except those two. In a similar way, one could exclude all multinode tests if one just has a single node available.
    • devices: This field specifies information on devices (for example) present in the partition. Device types are standardized in the EESSI test suite in the eessi.testsuite.constants.DEVICE_TYPES dictionary. This is used by the EESSI test suite to determine how many of these devices it can/should use per node. Typically, there is no need to define devices for CPU partitions. For GPU partitions, you want to define something like:
      'devices': {\n    'type': DEVICE_TYPES[GPU],\n    'num_devices': 4,  # or however many GPUs you have per node\n}\n
    • extras: This field specifies extra information on the partition, such as the GPU vendor. Valid fields for extras are standardized as constants in eessi.testsuite.constants (for example GPU_VENDOR). This is used by the EESSI test suite to decide if a partition can run a test that specifically requires a certain brand of GPU. Typically, there is no need to define extras for CPU partitions. For GPU partitions, you typically want to specify the GPU vendor, for example:
      'extras': {\n    GPU_VENDOR: GPU_VENDORS[NVIDIA]\n}\n

    Note that as more tests are added to the EESSI test suite, the use of features, devices and extras by the EESSI test suite may be extended, which may require an update of your configuration file to define newly recognized fields.

    Note

    Keep in mind that ReFrame partitions are virtual entities: they may or may not correspond to a partition as it is configured in your batch system. One might for example have a single partition in the batch system, but configure it as two separate partitions in the ReFrame configuration file based on additional constraints that are passed to the scheduler, see for example the AWS CitC example configuration.

    The EESSI test suite (and more generally, ReFrame) assumes the hardware within a partition defined in the ReFrame configuration file is homogeneous.

    "},{"location":"test-suite/ReFrame-configuration-file/#environments","title":"Environments","text":"

    ReFrame needs a programming environment to be defined in its configuration file for tests that need to be compiled before they are run. While we don't have such tests in the EESSI test suite, ReFrame requires some programming environment to be defined:

    site_configuration = {\n    ...\n    'environments': [\n        {\n            'name': 'default',  # Note: needs to match whatever we set for 'environs' in the partition\n            'cc': 'cc',\n            'cxx': '',\n            'ftn': '',\n        }\n    ]\n}\n

    Note

    The name here needs to match whatever we specified for the environs property of the partitions.

    "},{"location":"test-suite/ReFrame-configuration-file/#logging","title":"Logging","text":"

    ReFrame allows a large degree of control over what gets logged, and where. For convenience, we have created a common logging configuration in eessi.testsuite.common_config that provides a reasonable default. It can be used by importing common_logging_config and calling it as a function to define the 'logging setting:

    from eessi.testsuite.common_config import common_logging_config\n\nsite_configuration = {\n    ...\n    'logging':  common_logging_config(),\n}\n
    When combined by setting the $RFM_PREFIX environment variable, the output, performance log, and regular ReFrame logs will all end up in the directory specified by $RFM_PREFIX, which we recommend doing.

    Alternatively, a prefix can be passed as an argument like common_logging_config(prefix), which will control where the regular ReFrame log ends up. Note that the performance logs do not respect this prefix: they will still end up in the standard ReFrame prefix (by default the current directory, unless otherwise set with $RFM_PREFIX or --prefix).

    "},{"location":"test-suite/ReFrame-configuration-file/#cpu-auto-detection","title":"Auto-detection of processor information","text":"

    You can let ReFrame auto-detect the processor information for your system.

    ReFrame will automatically use auto-detection when two conditions are met:

    1. The partitions section of you configuration file does not specify processor information for a particular partition (as per our recommendation in the previous section);
    2. The remote_detect option is enabled in the general part of the configuration, as follows:
      site_configuration = {\n    'systems': ...\n    'logging': ...\n    'general': [\n        {\n            'remote_detect': True,\n        }\n    ]\n}\n

    To trigger the auto-detection of processor information, it is sufficient to let ReFrame list the available tests:

    reframe --list\n

    ReFrame will store the processor information for your system in ~/.reframe/topology/<system>-<partition>/processor.json.

    "},{"location":"test-suite/available-tests/","title":"Available tests","text":"

    The EESSI test suite currently includes tests for:

    • GROMACS
    • TensorFlow
    • OSU Micro-Benchmarks

    For a complete overview of all available tests in the EESSI test suite, see the eessi/testsuite/tests subdirectory in the EESSI/test-suite GitHub repository.

    "},{"location":"test-suite/available-tests/#gromacs","title":"GROMACS","text":"

    Several tests for GROMACS, a software package to perform molecular dynamics simulations, are included, which use the systems included in the HECBioSim benchmark suite:

    • Crambin (20K atom system)
    • Glutamine-Binding-Protein (61K atom system)
    • hEGFRDimer (465K atom system)
    • hEGFRDimerSmallerPL (465K atom system, only 10k steps)
    • hEGFRDimerPair (1.4M atom system)
    • hEGFRtetramerPair (3M atom system)

    It is implemented in tests/apps/gromacs.py, on top of the GROMACS test that is included in the ReFrame test library hpctestlib.

    To run this GROMACS test with all HECBioSim systems, use:

    reframe --run --name GROMACS\n

    To run this GROMACS test only for a specific HECBioSim system, use for example:

    reframe --run --name 'GROMACS.*HECBioSim/hEGFRDimerPair'\n

    To run this GROMACS test with the smallest HECBioSim system (Crambin), you can use the CI tag:

    reframe --run --name GROMACS --tag CI\n
    "},{"location":"test-suite/available-tests/#tensorflow","title":"TensorFlow","text":"

    A test for TensorFlow, a machine learning framework, is included, which is based on the \"Multi-worker training with Keras\" TensorFlow tutorial.

    It is implemented in tests/apps/tensorflow/.

    To run this TensorFlow test, use:

    reframe --run --name TensorFlow\n

    Warning

    This test requires TensorFlow v2.11 or newer, using an older TensorFlow version will not work!

    "},{"location":"test-suite/available-tests/#osumicrobenchmarks","title":"OSU Micro-Benchmarks","text":"

    A test for OSU Micro-Benchmarks, which provides an MPI benchmark.

    It is implemented in tests/apps/osu.py.

    To run this Osu Micro-Benchmark, use:

    reframe --run --name OSU-Micro-Benchmarks\n

    Warning

    This test requires OSU Micro-Benchmarks v5.9 or newer, using an older OSU -Micro-Benchmark version will not work!

    "},{"location":"test-suite/installation-configuration/","title":"Installing and configuring the EESSI test suite","text":"

    This page covers the requirements, installation and configuration of the EESSI test suite.

    "},{"location":"test-suite/installation-configuration/#requirements","title":"Requirements","text":"

    The EESSI test suite requires

    • Python >= 3.6
    • ReFrame v4.3.3 (or newer)
    • ReFrame test library (hpctestlib)
    "},{"location":"test-suite/installation-configuration/#installing-reframe","title":"Installing Reframe","text":"

    General instructions for installing ReFrame are available in the ReFrame documentation. To check if ReFrame is available, run the reframe command:

    reframe --version\n
    (for more details on the ReFrame version requirement, click here)

    Two important bugs were resolved in ReFrame's CPU autodetect functionality in version 4.3.3.

    We strongly recommend you use ReFrame >= 4.3.3.

    If you are using an older version of ReFrame, you may encounter some issues:

    • ReFrame will try to use the parallel launcher command configured for each partition (e.g. mpirun) when doing the remote autodetect. If there is no system-version of mpirun available, that will fail (see ReFrame issue #2926).
    • CPU autodetection only worked when using a clone of the ReFrame repository, not when it was installed with pip or EasyBuild (as is also the case for the ReFrame shipped with EESSI) (see ReFrame issue #2914).
    "},{"location":"test-suite/installation-configuration/#installing-reframe-test-library-hpctestlib","title":"Installing ReFrame test library (hpctestlib)","text":"

    The EESSI test suite requires that the ReFrame test library (hpctestlib) is available, which is currently not included in a standard installation of ReFrame.

    We recommend installing ReFrame using EasyBuild (version 4.8.1, or newer), or using a ReFrame installation that is available in the EESSI repository (version 2023.06, or newer).

    For example (using EESSI):

    source /cvmfs/software.eessi.io/versions/2023.06/init/bash\nmodule load ReFrame/4.3.3\n

    To check whether the ReFrame test library is available, try importing a submodule of the hpctestlib Python package:

    python3 -c 'import hpctestlib.sciapps.gromacs'\n
    "},{"location":"test-suite/installation-configuration/#installation","title":"Installation","text":"

    To install the EESSI test suite, you can either use pip or clone the GitHub repository directly:

    "},{"location":"test-suite/installation-configuration/#pip-install","title":"Using pip","text":"
    pip install git+https://github.com/EESSI/test-suite.git\n
    "},{"location":"test-suite/installation-configuration/#cloning-the-repository","title":"Cloning the repository","text":"
    git clone https://github.com/EESSI/test-suite $HOME/EESSI-test-suite\ncd EESSI-test-suite\nexport PYTHONPATH=$PWD:$PYTHONPATH\n
    "},{"location":"test-suite/installation-configuration/#verify-installation","title":"Verify installation","text":"

    To check whether the EESSI test suite installed correctly, try importing the eessi.testsuite Python package:

    python3 -c 'import eessi.testsuite'\n
    "},{"location":"test-suite/installation-configuration/#configuration","title":"Configuration","text":"

    Before you can run the EESSI test suite, you need to create a configuration file for ReFrame that is specific to the system on which the tests will be run.

    Example configuration files are available in the config subdirectory of the EESSI/test-suite GitHub repository](https://github.com/EESSI/test-suite/tree/main/config), which you can use as a template to create your own.

    "},{"location":"test-suite/installation-configuration/#configuring-reframe-environment-variables","title":"Configuring ReFrame environment variables","text":"

    We recommend setting a couple of $RFM_* environment variables to configure ReFrame, to avoid needing to include particular options to the reframe command over and over again.

    "},{"location":"test-suite/installation-configuration/#RFM_CONFIG_FILES","title":"ReFrame configuration file ($RFM_CONFIG_FILES)","text":"

    (see also RFM_CONFIG_FILES in ReFrame docs)

    Define the $RFM_CONFIG_FILES environment variable to instruct ReFrame which configuration file to use, for example:

    export RFM_CONFIG_FILES=$HOME/EESSI-test-suite/config/example.py\n

    Alternatively, you can use the --config-file (or -C) reframe option.

    See the section on the ReFrame configuration file below for more information.

    "},{"location":"test-suite/installation-configuration/#search-path-for-tests-rfm_check_search_path","title":"Search path for tests ($RFM_CHECK_SEARCH_PATH)","text":"

    (see also RFM_CHECK_SEARCH_PATH in ReFrame docs)

    Define the $RFM_CHECK_SEARCH_PATH environment variable to tell ReFrame which directory to search for tests.

    In addition, define $RFM_CHECK_SEARCH_RECURSIVE to ensure that ReFrame searches $RFM_CHECK_SEARCH_PATH recursively (i.e. so that also tests in subdirectories are found).

    For example:

    export RFM_CHECK_SEARCH_PATH=$HOME/EESSI-test-suite/eessi/testsuite/tests\nexport RFM_CHECK_SEARCH_RECURSIVE=1\n

    Alternatively, you can use the --checkpath (or -c) and --recursive (or -R) reframe options.

    "},{"location":"test-suite/installation-configuration/#RFM_PREFIX","title":"ReFrame prefix ($RFM_PREFIX)","text":"

    (see also RFM_PREFIX in ReFrame docs)

    Define the $RFM_PREFIX environment variable to tell ReFrame where to store the files it produces. E.g.

    export RFM_PREFIX=$HOME/reframe_runs\n

    This involves:

    • test output directories (which contain e.g. the job script, stderr and stdout for each of the test jobs)
    • staging directories (unless otherwise specified by staging, see below);
    • performance logs;

    Note that the default is for ReFrame to use the current directory as prefix. We recommend setting a prefix so that logs are not scattered around and nicely appended for each run.

    If our common logging configuration is used, the regular ReFrame log file will also end up in the location specified by $RFM_PREFIX.

    Warning

    Using the --prefix option in your reframe command is not equivalent to setting $RFM_PREFIX, since our common logging configuration only picks up on the $RFM_PREFIX environment variable to determine the location for the ReFrame log file.

    "},{"location":"test-suite/release-notes/","title":"Release notes for EESSI test suite","text":""},{"location":"test-suite/release-notes/#020-7-march-2024","title":"0.2.0 (7 march 2024)","text":"

    This is a minor release of the EESSI test-suite

    It includes:

    • Implement the CI for regular runs on a system (#93)
    • Add OSU tests and update the hooks and configs to make the tests portable (#54, #95, #96, #97, #110, #116, #117, #118, #121)
    • Add extra scales to filter tests(#94)
    • add new hook to filter out invalid scales based on features in the config (#111)
    • unify test names (#108)
    • updates to CI workflow ((#102, #103, #104, #105)
    • Update common_config (#114)
    • Add common config item to redirect the report file to the same directory as e.g. the perflog (#122)
    • Fix code formatting + enforce it in CI workflow (#120)

    Bug fixes:

    • Fix hook _assign_num_tasks_per_node (#98)
    • fix import common-config vsc_hortense (#99)
    • fix typo in partition names in configuration file for vsc_hortense (#106)
    "},{"location":"test-suite/release-notes/#010-5-october-2023","title":"0.1.0 (5 October 2023)","text":"

    Version 0.1.0 is the first release of the EESSI test suite.

    It includes:

    • A well-structured eessi.testsuite Python package that provides constants, utilities, hooks, and tests, which can be installed with \"pip install\".
    • Tests for GROMACS and TensorFlow in eessi.testsuite.tests.apps that leverage the functionality provided by eessi.testsuite.*.
    • Examples of ReFrame configuration files for various systems in the config subdirectory.
    • A common_logging_config() function to facilitate the ReFrame logging configuration.
    • A set of standard device types and features that can be used in the partitions section of the ReFrame configuration file.
    • A set of tags (CI + scale) that can be used to filter checks.
    • Scripts that show how to run the test suite.
    "},{"location":"test-suite/usage/","title":"Using the EESSI test suite","text":"

    This page covers the usage of the EESSI test suite.

    We assume you have already installed and configured the EESSI test suite on your system.

    "},{"location":"test-suite/usage/#listing-available-tests","title":"Listing available tests","text":"

    To list the tests that are available in the EESSI test suite, use reframe --list (or reframe -L for short).

    If you have properly configured ReFrame, you should see a (potentially long) list of checks in the output:

    $ reframe --list\n...\n[List of matched checks]\n- ...\nFound 123 check(s)\n

    Note

    When using --list, checks are only generated based on modules that are available in the system where the reframe command is invoked.

    The system partitions specified in your ReFrame configuration file are not taken into account when using --list.

    So, if --list produces an overview of 50 checks, and you have 4 system partitions in your configuration file, actually running the test suite may result in (up to) 200 checks being executed.

    "},{"location":"test-suite/usage/#dry-run","title":"Performing a dry run","text":"

    To perform a dry run of the EESSI test suite, use reframe --dry-run:

    $ reframe --dry-run\n...\n[==========] Running 1234 check(s)\n\n[----------] start processing checks\n[ DRY      ] GROMACS_EESSI ...\n...\n[----------] all spawned checks have finished\n\n[  PASSED  ] Ran 1234/1234 test case(s) from 1234 check(s) (0 failure(s), 0 skipped, 0 aborted)\n

    Note

    When using --dry-run, the systems partitions listed in your ReFrame configuration file are also taken into account when generating checks, next to available modules and test parameters, which is not the case when using --list.

    "},{"location":"test-suite/usage/#running-the-full-test-suite","title":"Running the (full) test suite","text":"

    To actually run the (full) EESSI test suite and let ReFrame produce a performance report, use reframe --run --performance-report.

    We strongly recommend filtering the checks that will be run by using additional options like --system, --name, --tag (see the 'Filtering tests' section below), and doing a dry run first to make sure that the generated checks correspond to what you have in mind.

    "},{"location":"test-suite/usage/#reframe-output-and-log-files","title":"ReFrame output and log files","text":"

    ReFrame will generate various output and log files:

    • a general ReFrame log file with debug logging on the ReFrame run (incl. selection of tests, generating checks, test results, etc.);
    • stage directories for each generated check, in which the checks are run;
    • output directories for each generated check, which include the test output;
    • performance log files for each test, which include performance results for the test runs;

    We strongly recommend controlling where these files go by using the common logging configuration that is provided by the EESSI test suite in your ReFrame configuration file and setting $RFM_PREFIX (avoid using the cmd line option --prefix).

    If you do, and if you use ReFrame v4.3.3 or more newer, you should find the output and log files at:

    • general ReFrame log file at $RFM_PREFIX/logs/reframe_<datestamp>_<timestamp>.log;
    • stage directories in $RFM_PREFIX/stage/<system>/<partition>/<environment>/;
    • output directories in $RFM_PREFIX/output/<system>/<partition>/<environment>/;
    • performance log files in $RFM_PREFIX/perflogs/<system>/<partition>/<environment>/;

    In the stage and output directories, there will be a subdirectory for each check that was run, which are tagged with a unique hash (like d3adb33f) that is determined based on the specific parameters for that check (see the ReFrame documentation for more details on the test naming scheme).

    "},{"location":"test-suite/usage/#filtering-tests","title":"Filtering tests","text":"

    By default, ReFrame will automatically generate checks for each system partition, based on the tests available in the EESSI test suite, available software modules, and tags defined in the EESSI test suite.

    To avoid being overwhelmed by checks, it is recommend to apply filters so ReFrame only generates the checks you are interested in.

    "},{"location":"test-suite/usage/#filter-name","title":"Filtering by test name","text":"

    You can filter checks based on the full test name using the --name option (or -n), which includes the value for all test parameters.

    Here's an example of a full test name:

    GROMACS_EESSI %benchmark_info=HECBioSim/Crambin %nb_impl=cpu %scale=1_node %module_name=GROMACS/2023.1-foss-2022a /d3adb33f @example:gpu+default\n

    To let ReFrame only generate checks for GROMACS, you can use:

    reframe --name GROMACS\n

    To only run GROMACS checks with a particular version of GROMACS, you can use --name to only retain specific GROMACS modules:

    reframe --name %module_name=GROMACS/2023.1\n

    Likewise, you can filter on any part of the test name.

    You can also select one specific check using the corresponding test hash, which is also part of the full test name (see /d3adb33f in the example above): for example:

    reframe --name /d3adb33f\n

    The argument passed to --name is interpreted as a Python regular expression, so you can use wildcards like .*, character ranges like [0-9], use ^ to specify that the pattern should match from the start of the test name, etc.

    Use --list or --dry-run to check the impact of using the --name option.

    "},{"location":"test-suite/usage/#filter-system-partition","title":"Filtering by system (partition)","text":"

    By default, ReFrame will generate checks for each system partition that is listed in your configuration file.

    To only let ReFrame checks for a particular system or system partition, you can use the --system option.

    For example:

    • To let ReFrame only generate checks for the system named example, use:
      reframe --system example ...\n
    • To let ReFrame only generate checks for the gpu partition of the system named example, use:
      reframe --system example:gpu ...\n

    Use --dry-run to check the impact of using the --system option.

    "},{"location":"test-suite/usage/#filter-tag","title":"Filtering by tags","text":"

    To filter tests using one or more tags, you can use the --tag option.

    Using --list-tags you can get a list of known tags.

    To check the impact of this on generated checks by ReFrame, use --list or --dry-run.

    "},{"location":"test-suite/usage/#ci-tag","title":"CI tag","text":"

    For each software that is included in the EESSI test suite, a small test is tagged with CI to indicate it can be used in a Continuous Integration (CI) environment.

    Hence, you can use this tag to let ReFrame only generate checks for small test cases:

    reframe --tag CI\n

    For example:

    $ reframe --name GROMACS --tag CI\n...\n
    "},{"location":"test-suite/usage/#scale-tags","title":"scale tags","text":"

    The EESSI test suite defines a set of custom tags that control the scale of checks, which specify many cores/GPUs/nodes should be used for running a check. The number of cores and GPUs serves as an upper limit; the actual count depends on the specific configuration of cores, GPUs, and sockets within the node, as well as the specific test being carried out.

    tag name description 1_core using 1 CPU core 1 GPU 2_cores using 2 CPU cores and 1 GPU 4_cores using 4 CPU cores and 1 GPU 1_cpn_2_nodes using 1 CPU core per node, 1 GPU per node, and 2 nodes 1_cpn_4_nodes using 1 CPU core per node, 1 GPU per node, and 4 nodes 1_8_node using 1/8th of a node (12.5% of available cores/GPUs, 1 at minimum) 1_4_node using a quarter of a node (25% of available cores/GPUs, 1 at minimum) 1_2_node using half of a node (50% of available cores/GPUs, 1 at minimum) 1_node using a full node (all available cores/GPUs) 2_nodes using 2 full nodes 4_nodes using 4 full nodes 8_nodes using 8 full nodes 16_nodes using 16 full nodes"},{"location":"test-suite/usage/#using-multiple-tags","title":"Using multiple tags","text":"

    To filter tests using multiple tags, you can:

    • use | as separator to indicate that one of the specified tags must match (logical OR, for example --tag='1_core|2_cores');
    • use the --tag option multiple times to indicate that all specified tags must match (logical AND, for example --tag CI --tag 1_core);
    "},{"location":"test-suite/usage/#example-commands","title":"Example commands","text":"

    Running all GROMACS tests on 4 cores on the cpu partition

    reframe --run --system example:cpu --name GROMACS --tag 4_cores --performance-report\n

    List all checks for TensorFlow 2.11 using a single node

    reframe --list --name %module_name=TensorFlow/2.11 --tag 1_node\n

    Dry run of TensorFlow CI checks on a quarter (1/4) of a node (on all system partitions)

    reframe --dry-run --name 'TensorFlow.*CUDA' --tag 1_4_node --tag CI\n
    "},{"location":"test-suite/usage/#overriding-test-parameters-advanced","title":"Overriding test parameters (advanced)","text":"

    You can override test parameters using the --setvar option (or -S).

    This can be done either globally (for all tests), or only for specific tests (which is recommended when using --setvar).

    For example, to run all GROMACS checks with a specific GROMACS module, you can use:

    reframe --setvar GROMACS_EESSI.modules=GROMACS/2023.1-foss-2022a ...\n

    Warning

    We do not recommend using --setvar, since it is quite easy to make unintended changes to test parameters this way that can result in broken checks.

    You should try filtering tests using the --name or --tag options instead.

    "},{"location":"using_eessi/basic_commands/","title":"Basic commands","text":""},{"location":"using_eessi/basic_commands/#basic-commands-to-access-software-provided-via-eessi","title":"Basic commands to access software provided via EESSI","text":"

    EESSI provides software through environment module files and Lmod.

    To see which modules (and extensions) are available, run:

    module avail\n

    Below is a short excerpt of the output produced by module avail, showing 10 modules only.

       PyYAML/5.3-GCCcore-9.3.0\n   Qt5/5.14.1-GCCcore-9.3.0\n   Qt5/5.15.2-GCCcore-10.3.0                               (D)\n   QuantumESPRESSO/6.6-foss-2020a\n   R-bundle-Bioconductor/3.11-foss-2020a-R-4.0.0\n   R/4.0.0-foss-2020a\n   R/4.1.0-foss-2021a                                      (D)\n   re2c/1.3-GCCcore-9.3.0\n   re2c/2.1.1-GCCcore-10.3.0                               (D)\n   RStudio-Server/1.3.1093-foss-2020a-Java-11-R-4.0.0\n

    Load modules with module load package/version, e.g., module load R/4.1.0-foss-2021a, and try out the software. See below for a short session

    [EESSI 2023.06] $ module load R/4.1.0-foss-2021a\n[EESSI 2021.06] $ which R\n/cvmfs/software.eessi.io/versions/2021.12/software/linux/x86_64/intel/skylake_avx512/software/R/4.1.0-foss-2021a/bin/R\n[EESSI 2023.06] $ R --version\nR version 4.1.0 (2021-05-18) -- \"Camp Pontanezen\"\nCopyright (C) 2021 The R Foundation for Statistical Computing\nPlatform: x86_64-pc-linux-gnu (64-bit)\n\nR is free software and comes with ABSOLUTELY NO WARRANTY.\nYou are welcome to redistribute it under the terms of the\nGNU General Public License versions 2 or 3.\nFor more information about these matters see\nhttps://www.gnu.org/licenses/.\n
    "},{"location":"using_eessi/building_on_eessi/","title":"Building software on top of EESSI","text":""},{"location":"using_eessi/building_on_eessi/#building-software-on-top-of-eessi-with-easybuild","title":"Building software on top of EESSI with EasyBuild","text":"

    Building on top of EESSI with EasyBuild is relatively straightforward. One crucial feature is that EasyBuild supports building against operating system libraries that are not in a standard prefix (such as /usr/lib). This is required when building against EESSI, since all of the software in EESSI is built against the compatibility layer.

    "},{"location":"using_eessi/building_on_eessi/#starting-the-eessi-software-environment","title":"Starting the EESSI software environment","text":"

    Start your environment as described here

    "},{"location":"using_eessi/building_on_eessi/#configure-easybuild","title":"Configure EasyBuild","text":"

    To configure EasyBuild, first, check out the EESSI software-layer repository. We advise you to check out the branch corresponding to the version of EESSI you would like to use.

    If you are unsure which version you are using, you can run

    echo ${EESSI_VERSION}\n
    to check it.

    To build on top of e.g. version 2023.06 of the EESSI software stack, we check it out, and go into that directory:

    git clone https://github.com/EESSI/software-layer/ --branch 2023.06\ncd software-layer\n
    Then, you have to pick a working directory (that you have write access to) where EasyBuild can do the build, and an install directory (with sufficient storage space), where EasyBuild can install it. In this example, we create a temporary directory in /tmp/ as our working directory, and use $HOME/.local/easybuild as our installpath:
    export WORKDIR=$(mktemp --directory --tmpdir=/tmp  -t eessi-build.XXXXXXXXXX)\nsource configure_easybuild\nexport EASYBUILD_INSTALLPATH=\"${HOME}/.local/easybuild\"\n
    Next, you load the EasyBuild module that you want to use, e.g.
    module load EasyBuild/4.8.2\n
    Finally, you can check the current configuration for EasyBuild using
    eb --show-config\n

    Note

    We use EasyBuild's default behaviour in optimizing for the host architecture. Since the EESSI initialization script also loads the EESSI stack that is optimized for your host architecture, this matches nicely. However, if you work on a cluster with heterogeneous node types, you have to realize you can only use these builds on the same architecture as where you build them. You can use different EASYBUILD_INSTALLPATHs if you want to build for different host architectures. For example, when you are on a system that has a mix of AMD zen3 and AMD zen4 nodes, you might want to use EASYBUILD_INSTALLPATH=$HOME/.local/easybuild/zen3 when building on a zen3 node, EASYBUILD_INSTALLPATH=$HOME/.local/easybuild/zen4 when building on a zen4 node. Then, in the step beloww, instead of the module use command listed there, you can use module use $HOME/.local/easybuild/zen3/modules/all when you want to run on a zen3 node and module use $HOME/.local/easybuild/zen4/modules/all when you want to run on a zen4 node.

    "},{"location":"using_eessi/building_on_eessi/#building","title":"Building","text":"

    Now, you are ready to build. For example, at the time of writing, netCDF-4.9.0-gompi-2022a.eb was not in the EESSI environment yet, so you can build it yourself:

    eb netCDF-4.9.0-gompi-2022a.eb\n

    Note

    If this netCDF module is available by the time you are trying, you can force a local rebuild by adding the --rebuild argument in order to experiment with building locally, or pick a different EasyConfig to build.

    "},{"location":"using_eessi/building_on_eessi/#using-the-newly-built-module","title":"Using the newly built module","text":"

    First, you'll need to add the subdirectory of the EASYBUILD_INSTALLPATH that contains the modules to the MODULEPATH. You can do that using:

    module use ${EASYBUILD_INSTALLPATH}/modules/all\n

    you may want to do this as part of your .bashrc.

    Note

    Be careful adding to the MODULEPATH in your .bashrc if you are on a cluster with heterogeneous architectures. You don't want to pick up on a module that was not compiled for the correct architectures accidentally.

    Since your module is built on top of the EESSI environment, that needs to be loaded first (as described here), if you haven't already done so.

    Finally, you should be able to load our newly build module:

    module load netCDF/4.9.0-gompi-2022a\n

    "},{"location":"using_eessi/building_on_eessi/#manually-building-software-op-top-of-eessi","title":"Manually building software op top of EESSI","text":"

    Building software on top of EESSI would require your linker to use the same system-dependencies as the software in EESSI does. In other words: it requires you to link against libraries from the compatibility layer, instead of from your host OS.

    While we plan to support this in the future, manually building on top of EESSI is currently not supported yet in a trivial way.

    "},{"location":"using_eessi/eessi_demos/","title":"Running EESSI demos","text":"

    To really experience how using EESSI can significantly facilitate the work of researchers, we recommend running one or more of the EESSI demos.

    First, clone the eessi-demo Git repository, and move into the resulting directory:

    git clone https://github.com/EESSI/eessi-demo.git\ncd eessi-demo\n

    The contents of the directory should be something like this:

    $ ls -l\ntotal 48\ndrwxrwxr-x 2 example users  4096 May 15 13:26 Bioconductor\ndrwxrwxr-x 2 example users  4096 May 15 13:26 ESPResSo\ndrwxrwxr-x 2 example users  4096 May 15 13:26 GROMACS\n-rw-rw-r-- 1 example users 18092 Dec  5  2022 LICENSE\ndrwxrwxr-x 2 example users  4096 May 15 13:26 OpenFOAM\n-rw-rw-r-- 1 example users   543 May 15 13:26 README.md\ndrwxrwxr-x 3 example users  4096 May 15 13:26 scripts\ndrwxrwxr-x 2 example users  4096 May 15 13:26 TensorFlow\n

    The directories we care about are those that correspond to particular scientific software, like Bioconductor, GROMACS, OpenFOAM, TensorFlow, ...

    Each of these contains a run.sh script that can be used to start a small example run with that software. Every example takes a couple of minutes to run, even with limited resources only.

    "},{"location":"using_eessi/eessi_demos/#example-running-tensorflow","title":"Example: running TensorFlow","text":"

    Let's try running the TensorFlow example.

    First, we need to make sure that our environment is set up to use EESSI:

    source /cvmfs/software.eessi.io/versions/2023.06/init/bash\n

    Change to the TensorFlow subdirectory of the eessi-demo Git repository, and execute the run.sh script:

    [EESSI 2023.06] $ cd TensorFlow\n[EESSI 2023.06] $ ./run.sh\n

    Shortly after starting the script you should see output as shown below, which indicates that GROMACS has started running:

    Epoch 1/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.2983 - accuracy: 0.9140\nEpoch 2/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.1444 - accuracy: 0.9563\nEpoch 3/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.1078 - accuracy: 0.9670\nEpoch 4/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.0890 - accuracy: 0.9717\nEpoch 5/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.0732 - accuracy: 0.9772\n313/313 - 0s - loss: 0.0679 - accuracy: 0.9790 - 391ms/epoch - 1ms/step\n\nreal   1m24.645s\nuser   0m16.467s\nsys    0m0.910s\n
    "},{"location":"using_eessi/setting_up_environment/","title":"Setting up your environment","text":"

    To set up the EESSI environment, simply run the command:

    source /cvmfs/software.eessi.io/versions/2023.06/init/bash\n

    This may take a while as data is downloaded from a Stratum 1 server which is part of the CernVM-FS infrastructure to distribute files. You should see the following output:

    Found EESSI repo @ /cvmfs/software.eessi.io/versions/2023.06!\narchdetect says x86_64/amd/zen2\nUsing x86_64/amd/zen2 as software subdirectory.\nUsing /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen2/modules/all as the directory to be added to MODULEPATH.\nFound Lmod configuration file at /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen2/.lmod/lmodrc.lua\nInitializing Lmod...\nPrepending /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen2/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI (2023.06), have fun!\n{EESSI 2023.06} [user@system ~]$ # (2)!\n
    1. What is reported here depends on the CPU architecture of the machine you are running the source command.
    2. This is the prompt indicating that you have access to the EESSI software stack.

    The last line is the shell prompt.

    Your environment is now set up, you are ready to start running software provided by EESSI!

    "},{"location":"blog/archive/2024/","title":"2024","text":""}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Welcome to the EESSI project documentation!","text":"

    Quote

    What if there was a way to avoid having to install a broad range of scientific software from scratch on every HPC cluster or cloud instance you use or maintain, without compromising on performance?

    The European Environment for Scientific Software Installations (EESSI, pronounced as \"easy\") is a collaboration between different European partners in HPC community. The goal of this project is to build a common stack of scientific software installations for HPC systems and beyond, including laptops, personal workstations and cloud infrastructure.

    "},{"location":"#quick-links","title":"Quick links","text":"
    • What is EESSI?
    • Contact info

    For users:

    • software.eessi.io repository
    • Access, initialize and use EESSI
    • How to run EESSI test suite
    • Get help or report issue

    For system administrators:

    • EESSI layered structure: filesystem, compatibility, software
    • Installing EESSI
    • Setting up a mirror server

    For contributors:

    • Adding software to EESSI
    • Meetings

    The EESSI project was covered during a quick AWS HPC Tech Short video (15 June 2023):

    "},{"location":"bot/","title":"Build-test-deploy bot","text":"

    Building, testing, and deploying software is done by one or more bot instances.

    The EESSI build-test-deploy bot is implemented as a GitHub App in the eessi-bot-software-layer repository.

    It operates in the context of pull requests to the compatibility-layer repository or the software-layer repository, and follows the instructions supplied by humans, so the procedure of adding software to EESSI is semi-automatic.

    It leverages the scripts provided in the bot/ subdirectory of the target repository (see for example here), like bot/build.sh to build software, and bot/check-result.sh to check whether the software was built correctly.

    "},{"location":"bot/#high-level-design","title":"High-level design","text":"

    The bot consists of two components: the event handler, and the job manager.

    "},{"location":"bot/#event-handler","title":"Event handler","text":"

    The bot event handler is responsible for handling GitHub events for the GitHub repositories it is registered to.

    It is triggered for every event that it receives from GitHub. Most events are ignored, but specific events trigger the bot to take action.

    Examples of actionable events are submitting of a comment that starts with bot:, which may specify an instruction for the bot like building software, or adding a bot:deploy label (see deploying).

    "},{"location":"bot/#job-manager","title":"Job manager","text":"

    The bot job manager is responsible for monitoring the queued and running jobs, and reporting back when jobs completed.

    It runs every couple of minutes as a cron job.

    "},{"location":"bot/#basics","title":"Basics","text":"

    Instructions for the bot should always start with bot:.

    To get help from the bot, post a comment with bot: help.

    To make the bot report how it is configured, post a comment with bot: show_config.

    "},{"location":"bot/#permissions","title":"Permissions","text":"

    The bot is configured to only act on instructions issued by specific GitHub accounts.

    There are separate configuration options for allowing to send instructions to the bot, to trigger building of software, and to deploy software installations in to the EESSI repository.

    Note

    Ask for help in the #software-layer-bot channel of the EESSI Slack if needed!

    "},{"location":"bot/#building","title":"Building","text":"

    To instruct the bot to build software, one or more build instructions should be issued by posting a comment in the pull request (see also here).

    The most basic build instruction that can be sent to the bot is:

    bot: build\n

    Warning

    Only use bot: build if you are confident that it is OK to do so.

    Most likely, you want to supply one or more filters to avoid that the bot builds for all its configurations.

    "},{"location":"bot/#filters","title":"Filters","text":"

    Build instructions can include filters that are applied by each bot instance to determine which builds should be executed, based on:

    • instance: the name of the bot instance, for example instance:aws for the bot instance running in AWS;
    • repository: the target repository, for example eessi-2023.06-software which corresponds to the 2023.06 version of the EESSI software layer;
    • architecture: the name of the CPU microarchitecture, for example x86_64/amd/zen2;

    Note

    Use : as separator to specify a value for a particular filter, do not add spaces after the :.

    The bot recognizes shorthands for the supported filters, so you can use inst:... instead of instance:..., repo:... instead of repository:..., and arch:... instead of architecture:....

    "},{"location":"bot/#combining-filters","title":"Combining filters","text":"

    You can combine multiple filters in a single build instruction. Separate filters with a space, order of filters does not matter.

    For example:

    bot: build repo:eessi-hpc.org-2023.06-software arch:x86_64/amd/zen2\n
    "},{"location":"bot/#multiple-build-instructions","title":"Multiple build instructions","text":"

    You can issue multiple build instructions in a single comment, even across multiple bot instances, repositories, and CPU targets. Specify one build instruction per line.

    For example:

    bot: build repo:eessi-hpc.org-2023.06-software arch:x86_64/amd/zen3 inst:aws\nbot: build repo:eessi-hpc.org-2023.06-software arch:aarch64/generic inst:azure\n

    Note

    The bot applies the filters with partial matching, which you can use to combine multiple build instructions into a single one.

    For example, if you only want to build for all aarch64 CPU targets, you can use arch:aarch64 as filter.

    The same applies to the instance and repository filters.

    "},{"location":"bot/#behind-the-scenes","title":"Behind-the-scenes","text":""},{"location":"bot/#processing-build-instructions","title":"Processing build instructions","text":"

    When the bot receives build instructions through a comment in a pull request, they are processed by the event handler component. It will:

    1) Combine its active configuration (instance name, repositories, supported CPU targets) and the build instructions to prepare a list of jobs to submit;

    2) Create a working directory for each job, including a Slurm job script that runs the bot/build.sh script in the context of the changes proposed in the pull request to build the software, and runs bot/check-result.sh script at the end to check whether the build was successful;

    3) Submit each prepared job to a workernode that can build for the specified CPU target, and put a hold on it.

    "},{"location":"bot/#managing-build-jobs","title":"Managing build jobs","text":"

    During the next iteration of the job manager, the submitted jobs are released and queued for execution.

    The job manager also monitors the running jobs at regular intervals, and reports back in the pull request when a job has completed. It also reports the result (SUCCESS or FAILURE ), based on the result of the bot/check-result.sh script.

    "},{"location":"bot/#artefacts","title":"Artefacts","text":"

    If all goes well, each job should produce a tarball as an artefact, which contains the software installations and the corresponding environment module files.

    The message reported by the job manager provides an overview of the contents of the artefact, which was created by the bot/check-result.sh script.

    "},{"location":"bot/#testing","title":"Testing","text":"

    Warning

    The test phase is not implemented yet in the bot.

    We intend to use the EESSI test suite in different OS configurations to verify that the software that was built works as expected.

    "},{"location":"bot/#deploying","title":"Deploying","text":"

    To deploy the artefacts that were obtained in the build phase, you should add the bot: deploy label to the pull request.

    This will trigger the event handler to upload the artefacts for ingestion into the EESSI repository.

    "},{"location":"bot/#behind-the-scenes_1","title":"Behind-the-scenes","text":"

    The current setup for the software-layer repository, is as follows:

    • The bot deploys the artefacts (tarballs) to an S3 bucket in AWS, along with a metadata file, using the eessi-upload-to-staging script;
    • A cron job that runs every couple of minutes on the CernVM-FS Stratum-0 server opens a pull request to the (private) EESSI/staging repository, to move the metadata file for each uploaded tarball from the staged to the approved directory;
    • Once that pull request gets merged, the target is automatically ingested into the EESSI repository by a cron job on the Stratum-0 server, and the metadata file is moved from approved to ingested in the EESSI/staging repository;
    "},{"location":"compatibility_layer/","title":"Compatibility layer","text":"

    The middle layer of the EESSI project is the compatibility layer, which ensures that our scientific software stack is compatible with different client operating systems (different Linux distributions, macOS and even Windows via WSL).

    For this we rely on Gentoo Prefix, by installing a limited set of Gentoo Linux packages in a non-standard location (a \"prefix\"), using Gentoo's package manager Portage.

    The compatible layer is maintained via our https://github.com/EESSI/compatibility-layer GitHub repository.

    "},{"location":"contact/","title":"Contact info","text":"

    For more information:

    • Visit our website
    • Consult our documentation
    • Ask for help at our support portal
    • Join our Slack channel
    • Reach out to one of the project partners
    • Check out our GitHub repositories
    • Follow us on Twitter

    "},{"location":"filesystem_layer/","title":"Filesystem layer","text":""},{"location":"filesystem_layer/#cernvm-file-system-cernvm-fs","title":"CernVM File System (CernVM-FS)","text":"

    The bottom layer of the EESSI project is the filesystem layer, which is responsible for distributing the software stack.

    For this we rely on CernVM-FS (or CVMFS for short), a network file system used to distribute the software to the clients in a fast, reliable and scalable way.

    CVMFS was created over 10 years ago specifically for the purpose of globally distributing a large software stack. For the experiments at the Large Hadron Collider, it hosts several hundred million files and directories that are distributed to the order of hundred thousand client computers.

    The hierarchical structure with multiple caching layers (Stratum-0, Stratum-1's located at partner sites and local caching proxies) ensures good performance with limited resources. Redundancy is provided by using multiple Stratum-1's at various sites. Since CVMFS is based on the HTTP protocol, the ubiquitous Squid caching proxy can be leveraged to reduce server loads and improve performance at large installations (such as HPC clusters). Clients can easily mount the file system (read-only) via a FUSE (Filesystem in Userspace) module.

    For a (basic) introduction to CernVM-FS, see this presentation.

    Detailed information about how we configure CVMFS is available at https://github.com/EESSI/filesystem-layer.

    "},{"location":"filesystem_layer/#eessi-infrastructure","title":"EESSI infrastructure","text":"

    For both the pilot and production repositories, EESSI hosts a CernVM-FS Stratum 0 and a number of public Stratum 1 servers. Client systems using EESSI by default connect against the public EESSI CernVM-FS Stratum 1 servers. The status of the infrastructure for the pilot repository is displayed at http://status.eessi-infra.org, while for the production repository it is displayed at https://status.eessi.io.

    "},{"location":"gpu/","title":"GPU support","text":"

    More information on the actions that must be performed to ensure that GPU software included in EESSI can use the GPU in your system is available below.

    Please open a support issue if you need help or have questions regarding GPU support.

    Make sure the ${EESSI_VERSION} version placeholder is defined!

    In this page, we use ${EESSI_VERSION} as a placeholder for the version of the EESSI repository, for example:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}\n

    Before inspecting paths, or executing any of the specified commands, you should define $EESSI_VERSION first, for example with:

    export EESSI_VERSION=2023.06\n

    "},{"location":"gpu/#nvidia","title":"Support for using NVIDIA GPUs","text":"

    EESSI supports running CUDA-enabled software. All CUDA-enabled modules are marked with the (gpu) feature, which is visible in the output produced by module avail.

    "},{"location":"gpu/#nvidia_drivers","title":"NVIDIA GPU drivers","text":"

    For CUDA-enabled software to run, it needs to be able to find the NVIDIA GPU drivers of the host system. The challenge here is that the NVIDIA GPU drivers are not always in a standard system location, and that we can not install the GPU drivers in EESSI (since they are too closely tied to the client OS and GPU hardware).

    "},{"location":"gpu/#cuda_sdk","title":"Compiling CUDA software","text":"

    An additional requirement is necessary if you want to be able to compile CUDA-enabled software using a CUDA installation included in EESSI. This requires a full CUDA SDK, but the CUDA SDK End User License Agreement (EULA) does not allow for full redistribution. In EESSI, we are (currently) only allowed to redistribute the files needed to run CUDA software.

    Full CUDA SDK only needed to compile CUDA software

    Without a full CUDA SDK on the host system, you will still be able to run CUDA-enabled software from the EESSI stack, you just won't be able to compile additional CUDA software.

    Below, we describe how to make sure that the EESSI software stack can find your NVIDIA GPU drivers and (optionally) full installations of the CUDA SDK.

    "},{"location":"gpu/#host_injections","title":"host_injections variant symlink","text":"

    In the EESSI repository, a special directory has been prepared where system administrators can install files that can be picked up by software installations included in EESSI. This gives the ability to administrators to influence the behaviour (and capabilities) of the EESSI software stack.

    This special directory is located in /cvmfs/software.eessi.io/host_injections, and it is a CernVM-FS Variant Symlink: a symbolic link for which the target can be controlled by the CernVM-FS client configuration (for more info, see 'Variant Symlinks' in the official CernVM-FS documentation).

    Default target for host_injections variant symlink

    Unless otherwise configured in the CernVM-FS client configuration for the EESSI repository, the host_injections symlink points to /opt/eessi on the client system:

    $ ls -l /cvmfs/software.eessi.io/host_injections\nlrwxrwxrwx 1 cvmfs cvmfs 10 Oct  3 13:51 /cvmfs/software.eessi.io/host_injections -> /opt/eessi\n

    As an example, let's imagine that we want to use a architecture-specific location on a shared filesystem as the target for the symlink. This has the advantage that one can make changes under host_injections that affect all nodes which share that CernVM-FS configuration. Configuring this in your CernVM-FS configuration would mean adding the following line in the client configuration file:

    EESSI_HOST_INJECTIONS=/shared_fs/path\n

    Don't forget to reload the CernVM-FS configuration

    After making a change to a CernVM-FS configuration file, you also need to reload the configuration:

    sudo cvmfs_config reload\n

    All CUDA-enabled software in EESSI expects the CUDA drivers to be available in a specific subdirectory of this host_injections directory. In addition, installations of the CUDA SDK included EESSI are stripped down to the files that we are allowed to redistribute; all other files are replaced by symbolic links that point to another specific subdirectory of host_injections. For example:

    $ ls -l /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen3/software/CUDA/12.1.1/bin/nvcc\nlrwxrwxrwx 1 cvmfs cvmfs 109 Dec 21 14:49 /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen3/software/CUDA/12.1.1/bin/nvcc -> /cvmfs/software.eessi.io/host_injections/2023.06/software/linux/x86_64/amd/zen3/software/CUDA/12.1.1/bin/nvcc\n

    If the corresponding full installation of the CUDA SDK is available there, the CUDA installation included in EESSI can be used to build CUDA software.

    "},{"location":"gpu/#nvidia_eessi_native","title":"Using NVIDIA GPUs via a native EESSI installation","text":"

    Here, we describe the steps to enable GPU support when you have a native EESSI installation on your system.

    Required permissions

    To enable GPU support for EESSI on your system, you will typically need to have system administration rights, since you need write permissions on the folder to the target directory of the host_injections symlink.

    "},{"location":"gpu/#exposing-nvidia-gpu-drivers","title":"Exposing NVIDIA GPU drivers","text":"

    To install the symlinks to your GPU drivers in host_injections, run the link_nvidia_host_libraries.sh script that is included in EESSI:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/scripts/gpu_support/nvidia/link_nvidia_host_libraries.sh\n

    This script uses ldconfig on your host system to locate your GPU drivers, and creates symbolic links to them in the correct location under host_injections directory. It also stores the CUDA version supported by the driver that the symlinks were created for.

    Re-run link_nvidia_host_libraries.sh after NVIDIA GPU driver update

    You should re-run this script every time you update the NVIDIA GPU drivers on the host system.

    Note that it is safe to re-run the script even if no driver updates were done: the script should detect that the current version of the drivers were already symlinked.

    "},{"location":"gpu/#installing-full-cuda-sdk-optional","title":"Installing full CUDA SDK (optional)","text":"

    To install a full CUDA SDK under host_injections, use the install_cuda_host_injections.sh script that is included in EESSI:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/scripts/gpu_support/nvidia/install_cuda_host_injections.sh\n

    For example, to install CUDA 12.1.1 in the directory that the host_injections variant symlink points to, using /tmp/$USER/EESSI as directory to store temporary files:

    /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/scripts/gpu_support/nvidia/install_cuda_host_injections.sh --cuda-version 12.1.1 --temp-dir /tmp/$USER/EESSI --accept-cuda-eula\n
    You should choose the CUDA version you wish to install according to what CUDA versions are included in EESSI; see the output of module avail CUDA/ after setting up your environment for using EESSI.

    You can run /cvmfs/software.eessi.io/scripts/install_cuda_host_injections.sh --help to check all of the options.

    Tip

    This script uses EasyBuild to install the CUDA SDK. For this to work, two requirements need to be satisfied:

    • module load EasyBuild should work (or the eb command is already available in the environment);
    • The version of EasyBuild being used should provide the requested version of the CUDA easyconfig file (in the example case above, that's CUDA-12.1.1.eb).

    You can rely on the EasyBuild installation that is included in EESSI for this.

    Alternatively, you may load an EasyBuild module manually before running the install_cuda_host_injections.sh script to make an eb command available.

    "},{"location":"gpu/#nvidia_eessi_container","title":"Using NVIDIA GPUs via EESSI in a container","text":"

    We focus here on the Apptainer/Singularity use case, and have only tested the --nv option to enable access to GPUs from within the container.

    If you are using the EESSI container to access the EESSI software, the procedure for enabling GPU support is slightly different and will be documented here eventually.

    "},{"location":"gpu/#exposing-nvidia-gpu-drivers_1","title":"Exposing NVIDIA GPU drivers","text":"

    When running a container with apptainer or singularity it is not necessary to run the install_cuda_host_injections.sh script since both these tools use $LD_LIBRARY_PATH internally in order to make the host GPU drivers available in the container.

    The only scenario where this would be required is if $LD_LIBRARY_PATH is modified or undefined.

    "},{"location":"gpu/#gpu_cuda_testing","title":"Testing the GPU support","text":"

    The quickest way to test if software installations included in EESSI can access and use your GPU is to run the deviceQuery executable that is part of the CUDA-Samples module:

    module load CUDA-Samples\ndeviceQuery\n
    If both are successful, you should see information about your GPU printed to your terminal.

    "},{"location":"meetings/","title":"Meetings","text":""},{"location":"meetings/#monthly-meetings-online","title":"Monthly meetings (online)","text":"

    Online EESSI update meeting, every 1st Thursday of the month at 14:00 CE(S)T.

    More info can be found on the EESSI wiki.

    "},{"location":"meetings/#physical-meetings","title":"Physical meetings","text":"
    • EESSI Community Meeting in Amsterdam (NL), 14-16 Sept 2022
    "},{"location":"meetings/#physical-meetings-archive","title":"Physical meetings (archive)","text":""},{"location":"meetings/#2020","title":"2020","text":"
    • Meeting in Groningen (NL), 16 Jan 2020
    • Meeting in Delft (NL), 5 Mar 2020
    "},{"location":"meetings/#2019","title":"2019","text":"
    • Meeting in Cambridge (UK), 20-21 May 2019
    "},{"location":"overview/","title":"Overview of the EESSI project","text":""},{"location":"overview/#scope-goals","title":"Scope & Goals","text":"

    Through the EESSI project, we want to set up a shared stack of scientific software installations, and by doing so avoid a lot of duplicate work across HPC sites.

    For end users, we want to provide a uniform user experience with respect to available scientific software, regardless of which system they use.

    Our software stack should work on laptops, personal workstations, HPC clusters and in the cloud, which means we will need to support different CPUs, networks, GPUs, and so on. We hope to make this work for any Linux distribution and maybe even macOS and Windows via WSL, and a wide variety of CPU architectures (Intel, AMD, ARM, POWER, RISC-V).

    Of course we want to focus on the performance of the software, but also on automating the workflow for maintaining the software stack, thoroughly testing the installations, and collaborating efficiently.

    "},{"location":"overview/#inspiration","title":"Inspiration","text":"

    The EESSI concept is heavily inspired by Compute Canada software stack, which is a shared software stack used on all 5 major national systems in Canada and a bunch of smaller ones.

    The design of the Compute Canada software stack is discussed in detail in the PEARC'19 paper \"Providing a Unified Software Environment for Canada\u2019s National Advanced Computing Centers\".

    It has also been presented at the 5th EasyBuild User Meetings (slides, recorded talk), and is well documented.

    "},{"location":"overview/#layered-structure","title":"Layered structure","text":"

    The EESSI project consists of 3 layers.

    The bottom layer is the filesystem layer, which is responsible for distributing the software stack across clients.

    The middle layer is a compatibility layer, which ensures that the software stack is compatible with multiple different client operating systems.

    The top layer is the software layer, which contains the actual scientific software applications and their dependencies.

    The host OS still provides a couple of things, like drivers for network and GPU, support for shared filesystems like GPFS and Lustre, a resource manager like Slurm, and so on.

    "},{"location":"overview/#opportunities","title":"Opportunities","text":"

    We hope to collaborate with interested parties across the HPC community, including HPC centres, vendors, consultancy companies and scientific software developers.

    Through our software stack, HPC users can seamlessly hop between sites, since the same software is available everywhere.

    We can leverage each others work with respect to providing tested and properly optimized scientific software installations more efficiently, and provide a platform for easy benchmarking of new systems.

    By working together with the developers of scientific software we can provide vetted installations for the broad HPC community.

    "},{"location":"overview/#challenges","title":"Challenges","text":"

    There are many challenges in an ambitious project like this, including (but probably not limited to):

    • Finding time and manpower to get the software stack set up properly;
    • Leveraging system sources like network interconnect (MPI & co), accelerators (GPUs), ...;
    • Supporting CPU architectures other than x86_64, including ARM, POWER, RISC-V, ...
    • Dealing with licensed software, like Intel tools, MATLAB, ANSYS, ...;
    • Integration with resource managers (Slurm) and vendor provided software (Cray PE);
    • Convincing HPC site admins to adopt EESSI;
    "},{"location":"overview/#current-status","title":"Current status","text":"

    (June 2020)

    We are actively working on the EESSI repository, and are organizing monthly meetings to discuss progress and next steps forward.

    Keep an eye on our GitHub repositories at https://github.com/EESSI and our Twitter feed.

    "},{"location":"partners/","title":"Project partners","text":""},{"location":"partners/#delft-university-of-technology-the-netherlands","title":"Delft University of Technology (The Netherlands)","text":"
    • Robbert Eggermont
    • Koen Mulderij
    "},{"location":"partners/#dell-technologies-europe","title":"Dell Technologies (Europe)","text":"
    • Walther Blom, High Education & Research
    • Jaco van Dijk, Higher Education
    "},{"location":"partners/#eindhoven-university-of-technology","title":"Eindhoven University of Technology","text":"
    • Alain van Hoof, HPC-Lab
    "},{"location":"partners/#ghent-university-belgium","title":"Ghent University (Belgium)","text":"
    • Kenneth Hoste, HPC-UGent
    "},{"location":"partners/#hpcnow-spain","title":"HPCNow! (Spain)","text":"
    • Oriol Mula Valls
    "},{"location":"partners/#julich-supercomputing-centre-germany","title":"J\u00fclich Supercomputing Centre (Germany)","text":"
    • Alan O'Cais
    "},{"location":"partners/#university-of-cambridge-united-kingdom","title":"University of Cambridge (United Kingdom)","text":"
    • Mark Sharpley, Research Computing Services Division
    "},{"location":"partners/#university-of-groningen-the-netherlands","title":"University of Groningen (The Netherlands)","text":"
    • Bob Dr\u00f6ge, Center for Information Technology
    • Henk-Jan Zilverberg, Center for Information Technology
    "},{"location":"partners/#university-of-twente-the-netherlands","title":"University of Twente (The Netherlands)","text":"
    • Geert Jan Laanstra, Electrical Engineering, Mathematics and Computer Science (EEMCS)
    "},{"location":"partners/#university-of-oslo-norway","title":"University of Oslo (Norway)","text":"
    • Terje Kvernes
    "},{"location":"partners/#university-of-bergen-norway","title":"University of Bergen (Norway)","text":"
    • Thomas R\u00f6blitz
    "},{"location":"partners/#vrije-universiteit-amsterdam-the-netherlands","title":"Vrije Universiteit Amsterdam (The Netherlands)","text":"
    • Peter Stol
    "},{"location":"partners/#surf-the-netherlands","title":"SURF (The Netherlands)","text":"
    • Caspar van Leeuwen
    • Marco Verdicchio
    • Bas van der Vlies
    "},{"location":"software_layer/","title":"Software layer","text":"

    The top layer of the EESSI project is the software layer, which provides the actual scientific software installations.

    To install the software we include in our stack, we use EasyBuild, a framework for installing scientific software on HPC systems. These installations are optimized for a particular system architecture (specific CPU and GPU generation).

    To access these software installation we provide environment module files and use Lmod, a modern environment modules tool which has been widely adopted in the HPC community in recent years.

    We leverage the archspec Python library to automatically select the best suited part of the software stack for a particular host, based on its system architecture.

    The software layer is maintained through our https://github.com/EESSI/software-layer GitHub repository.

    "},{"location":"software_testing/","title":"Software testing","text":"

    This page has been replaced with test-suite, update your bookmarks!

    "},{"location":"support/","title":"Getting support for EESSI","text":"

    Thanks to the MultiXscale EuroHPC project we are able to provide support to the users of EESSI.

    The EESSI support portal is hosted in GitLab: https://gitlab.com/eessi/support.

    "},{"location":"support/#open-issue","title":"How to report a problem or ask a question","text":"

    We recommend you to use a GitLab account if you want to get help from the EESSI support team.

    If you have a GitLab account you can submit your problems or questions on EESSI via the issue tracker of the EESSI support portal at https://gitlab.com/eessi/support/-/issues. Please use one of the provided templates (report a problem, software request, question, ...) when creating an issue.

    You can also contact us via our e-mail address support (@) eessi.io, which will automatically create a (private) issue in the EESSI support portal. When you send us an email, please provide us with as much information as possible on your question or problem. You can find an overview of the information that we would like to receive in the README of the EESSI support portal.

    "},{"location":"support/#level-of-support","title":"Level of Support","text":"

    We provide support for EESSI according to a \"reasonable effort\" standard. That means we will go into reasonable effort to help you, but we may not have the time to explore every potential cause, and it may not lead to a (quick) solution. You can compare this to the level of support you typically get from other active open source projects.

    Note that the more complete your reported issue is (e.g. description of the error, what you ran, the software environment in which you ran, minimal reproducer, etc.) the bigger the chance is that we can help you with \"reasonable effort\".

    "},{"location":"support/#what-do-we-provide-support-for","title":"What do we provide support for","text":""},{"location":"support/#accessing-and-using-the-eessi-software-stack","title":"Accessing and using the EESSI software stack","text":"

    If you have trouble connecting to the software stack, such as trouble related to installing or configuring CernVM-FS to access the EESSI filesystem layer, or running the software installations included in the EESSI compatibility layer or software layer, please contact us.

    Note that we can only help with problems related to the software installations (getting the software to run, to perform as expected, etc.). We do not provide support for using specific features of the provided software, nor can we fix (known or unknown) bugs in the software included in EESSI. We can only help with diagnosing and fixing problems that are caused by how the software was built and installed in EESSI.

    "},{"location":"support/#software-requests","title":"Software requests","text":"

    We are open to software requests for software that is not included in EESSI yet.

    The quickest way to add additional software to EESSI is by contributing it yourself as a community contribution, please see the documentation on adding software.

    Alternatively, you can send in a request to our support team. Please try to provide as much information on the software as possible: preferably use the issue template (which requires you to log in to GitLab), or make sure to cover the items listed here.

    Be aware that we can only provide software that has an appropriate open source license.

    "},{"location":"support/#eessi-test-suite","title":"EESSI test suite","text":"

    If you are using the EESSI test suite, you can get help via the EESSI support portal.

    "},{"location":"support/#build-and-deploy-bot","title":"Build-and-deploy bot","text":"

    If you are using the EESSI build-and-deploy bot, you can get help via the EESSI support portal.

    "},{"location":"support/#what-do-we-not-provide-support-for","title":"What do we not provide support for","text":"

    Do not contact the EESSI support team to get help with using software that is included in EESSI, unless you think the problems you are seeing are related to how the software was built and installed.

    Please consult the documentation of the software you are using, or contact the developers of the software directly, if you have questions regarding using the software, or if you think you have found a bug.

    Funded by the European Union. This work has received funding from the European High Performance Computing Joint Undertaking (JU) and countries participating in the project under grant agreement No 101093169.

    "},{"location":"talks/","title":"Talks related to EESSI","text":""},{"location":"talks/#2023","title":"2023","text":"
    • Streaming Optimised Scientific Software: an Introduction to EESSI (online tutorial, 5 Dec 2023)
    • Best Practices for CernVM-FS in HPC (online tutorial, 4 Dec 2023)
    • Streaming optimized scientific software installations on any Linux distro with EESSI (PackagingCon 2023, 27 Oct 2023)
    • Making scientific software EESSI - and fast (8-min AWS HPC Tech Short, 15 June 2023)
    "},{"location":"adding_software/building_software/","title":"Building software","text":"

    (for maintainers)

    "},{"location":"adding_software/building_software/#bot_build","title":"Instructing the bot to build","text":"

    Once the pull request is open, you can instruct the bot to build the software by posting a comment.

    For more information, see the building section in the bot documentation.

    Warning

    Permission to trigger building of software must be granted to your GitHub account first!

    See bot permissions for more information.

    "},{"location":"adding_software/building_software/#guidelines","title":"Guidelines","text":"
    • It may be wise to let the bot perform a test build first, rather than letting it build for a wide range of CPU targets.

    • If one of the builds failed, you can let the bot retry that specific build.

    • Make sure that the software has been built correctly for all CPU targets before you deploy!

    "},{"location":"adding_software/building_software/#checking-the-builds","title":"Checking the builds","text":"

    If all goes well, you should see SUCCESS for each build, along with button to get more information about the checks that were performed, and metadata information on the resulting artefact .

    Note

    Make sure the result is what you expect it to be for all builds before you deploy!

    "},{"location":"adding_software/building_software/#failing-builds","title":"Failing builds","text":"

    Warning

    The bot will currently not give you any information on how or why a build is failing.

    Ask for help in the #software-layer channel of the EESSI Slack if needed!

    "},{"location":"adding_software/building_software/#instructing-the-bot-to-deploy","title":"Instructing the bot to deploy","text":"

    To make the bot deploy the successfully built software, you should issue the corresponding instruction to the bot.

    For more information, see the deploying section in the bot documentation.

    Warning

    Permission to trigger deployment of software installations must be granted to your GitHub account first!

    See bot permissions for more information.

    "},{"location":"adding_software/building_software/#merging-the-pull-request","title":"Merging the pull request","text":"

    You should be able to verify in the pull request that the ingestion has been done, since the CI should fail initially to indicate that some software installations listed in your modified easystack are missing.

    Once the ingestion has been done, simply re-triggering the CI workflow should be sufficient to make it pass , and then the pull request can be merged.

    Note

    This assumes that the easystack file being modified is considered by the CI workflow file (.github/workflows/test_eessi.yml) that checks for missing installations, in the correct branch (for example 2023.06) of the software-layer.

    If that's not the case yet, update this workflow in your pull request as well to add the missing easystack file!

    Warning

    You need permissions to re-trigger CI workflows and merge pull requests in the software-layer repository.

    Ask for help in the #software-layer channel of the EESSI Slack if needed!

    "},{"location":"adding_software/building_software/#getting-help","title":"Getting help","text":"

    If you have any questions, or if you need help with something, don't hesitate to contact us via the #software-layer channel of the EESSI Slack.

    "},{"location":"adding_software/contribution_policy/","title":"Contribution policy","text":"

    (version v0.1.0 - updated 9 Nov 2023)

    Note

    This policy is subject to change, please check back regularly.

    "},{"location":"adding_software/contribution_policy/#purpose","title":"Purpose","text":"

    The purpose of this contribution policy is to provide guidelines for adding software to EESSI.

    It informs about what requirements must be met in order for software to be eligible for inclusion in the EESSI software layer.

    "},{"location":"adding_software/contribution_policy/#requirements","title":"Requirements","text":"

    The following requirements must be taken into account when adding software to EESSI.

    Note that additional restrictions may apply in specific cases that are currently not covered explicitly by this policy.

    "},{"location":"adding_software/contribution_policy/#freely_redistributable_software","title":"i) Freely redistributable software","text":"

    Only freely redistributable software can be added to the EESSI repository, and we strongly prefer including only open source software in EESSI.

    Make sure that you are aware of the relevant software licenses, and that redistribution of the software you want to add to EESSI is allowed.

    For more information about a specific software license, see the SPDX license list.

    Note

    We intend to automatically verify that this requirement is met, by requiring that the SPDX license identifier is provided for all software included in EESSI.

    "},{"location":"adding_software/contribution_policy/#built_by_bot","title":"ii) Built by the bot","text":"

    All software included in the EESSI repository must be built autonomously by our bot .

    For more information, see our semi-automatic software installation procedure.

    "},{"location":"adding_software/contribution_policy/#easybuild","title":"iii) Built and installed with EasyBuild","text":"

    We currently require that all software installations in EESSI are built and installed using EasyBuild.

    We strongly prefer that the latest release of EasyBuild that is available at the time is used to add software to EESSI.

    The use of --from-pr and --include-easyblocks-from-pr to pull in changes to EasyBuild that are required to make the installation work correctly in EESSI is allowed, but only if that is strictly required (that is, if those changes are not included yet in the latest EasyBuild release).

    "},{"location":"adding_software/contribution_policy/#supported_toolchain","title":"iv) Supported compiler toolchain","text":"

    A compiler toolchain that is still supported by the latest EasyBuild release must be used for building the software.

    For more information on supported toolchains, see the EasyBuild toolchain support policy.

    "},{"location":"adding_software/contribution_policy/#recent_toolchains","title":"v) Recent toolchain versions","text":"

    We strongly prefer adding software to EESSI that was built with a recent compiler toolchain.

    When adding software to a particular version of EESSI, you should use a toolchain version that is already installed.

    If you would like to see an additional toolchain version being added to a particular version of EESSI, please open a support request for this, and motivate your request.

    "},{"location":"adding_software/contribution_policy/#recent_software_versions","title":"vi) Recent software versions","text":"

    We strongly prefer adding sufficiently recent software versions to EESSI.

    If you would like to add older software versions, please clearly motivate the need for this in your contribution.

    "},{"location":"adding_software/contribution_policy/#cpu_targets","title":"vii) CPU targets","text":"

    Software that is added to EESSI should work on all supported CPU targets.

    Exceptions to this requirement are allowed if technical problems that can not be resolved with reasonable effort prevent the installation of the software for specific CPU targets.

    "},{"location":"adding_software/contribution_policy/#testing","title":"viii) Testing","text":"

    We should be able to test the software installations via the EESSI test suite, in particular for software applications and user-facing tools.

    Ideally one or more tests are available that verify that the software is functionally correct, and that it (still) performs well.

    Tests that are run during the software installation procedure as performed by EasyBuild must pass. Exceptions can be made if only a small subset of tests fail for specific CPU targets, as long as these exceptions are tracked and an effort is made to assess the impact of those failing tests.

    It should be possible to run a minimal smoke test for the software included in EESSI, for example using EasyBuild's --sanity-check-only feature.

    Note

    The EESSI test suite is still in active development, and currently only has a minimal set of tests available.

    When the test suite is more mature, this requirement will be enforced more strictly.

    "},{"location":"adding_software/contribution_policy/#changelog","title":"Changelog","text":""},{"location":"adding_software/contribution_policy/#v010-9-nov-2023","title":"v0.1.0 (9 Nov 2023)","text":"
    • initial contribution policy
    "},{"location":"adding_software/debugging_failed_builds/","title":"Debugging failed builds","text":"

    (for contributors + maintainers)

    Unfortunately, software does not always build successfully. Since EESSI targets novel CPU architectures as well, build failures on such platforms are quite common, as the software and/or the software build systems have not always been adjusted to support these architectures yet.

    In EESSI, all software packages are built by a bot. This is great for builds that complete successfully as we can build many software packages for a wide range of hardware with little human intervention. However, it does mean that you, as contributor, can not easily access the build directory and build logs to figure out build issues.

    This page describes how you can interactively reproduce failed builds, so that you can more easily debug the issue.

    Throughout this page, we will use this PR as an example. It intends to add LAMMPS to EESSI. Among other issues, it failed on a building Plumed.

    "},{"location":"adding_software/debugging_failed_builds/#prerequisites","title":"Prerequisites","text":"

    You will need to have:

    • Access to a machine with the hardware for which the build that you want to debug failed.
    • On that machine, meet the requirements for running the EESSI container, as described on this page.
    "},{"location":"adding_software/debugging_failed_builds/#preparing-the-environment","title":"Preparing the environment","text":"

    A number of steps are needed to create the same environment in which the bot builds.

    • Fetching the feature branch from which you want to replicate a build.
    • Starting a shell in the EESSI container.
    • Start the Gentoo Prefix environment.
    • Start the EESSI software environment.
    • Configure EasyBuild.
    "},{"location":"adding_software/debugging_failed_builds/#fetching-the-feature-branch","title":"Fetching the feature branch","text":"

    Looking at the example PR, we see the PR is created from this fork. First, we clone the fork, then checkout the feature branch (LAMMPS_23Jun2022)

    git clone https://github.com/laraPPr/software-layer/\ncd software-layer\ngit checkout LAMMPS_23Jun2022\n
    Alternatively, if you already have a clone of the software-layer you can add it as a new remote
    cd software-layer\ngit remote add laraPPr https://github.com/laraPPr/software-layer/\ngit fetch laraPPr\ngit checkout LAMMPS_23Jun2022\n

    "},{"location":"adding_software/debugging_failed_builds/#starting-a-shell-in-the-eessi-container","title":"Starting a shell in the EESSI container","text":"

    Simply run the EESSI container (eessi_container.sh), which should be in the root of the software-layer repository

    ./eessi_container.sh --access rw\n

    If you want to install NVIDIA GPU software, make sure to also add the --nvidia all argument, to insure that your GPU drivers get mounted inside the container:

    ./eessi_container.sh --access rw --nvidia all\n

    Note

    You may have to press enter to clearly see the prompt as some messages beginning with CernVM-FS: have been printed after the first prompt Apptainer> was shown.

    "},{"location":"adding_software/debugging_failed_builds/#more-efficient-approach-for-multiplecontinued-debugging-sessions","title":"More efficient approach for multiple/continued debugging sessions","text":"

    While the above works perfectly well, you might not be able to complete your debugging session in one go. With the above approach, several steps will just be repeated every time you start a debugging session:

    • Downloading the container
    • Installing CUDA in your host injections directory (only if you use the EESSI-install-software.sh script, see below)
    • Installing all dependencies (before you get to the package that actually fails to build)

    To avoid this, we create two directories. One holds the container & host_injections, which are (typically) common between multiple PRs and thus you don't have to redownload the container / reinstall the host_injections if you start working on another PR. The other will hold the PR-specific data: a tarball storing the software you'll build in your interactive debugging session. The paths we pick here are just example, you can pick any persistent, writeable location for this:

    eessi_common_dir=${HOME}/eessi-manual-builds\neessi_pr_dir=${HOME}/pr360\n

    Now, we start the container

    SINGULARITY_CACHEDIR=${eessi_common_dir}/container_cache ./eessi_container.sh --access rw --nvidia all --host-injections ${eessi_common_dir}/host_injections --save ${eessi_pr_dir}\n

    Here, the SINGULARITY_CACHEDIR makes sure that if the container was already downloaded, and is present in the cache, it is not redownloaded. The host injections will just be picked up from ${eessi_common_dir}/host_injections (if those were already installed before). And finally, the --save makes sure that everything that you build in the container gets stored in a tarball as soon as you exit the container.

    Note that the first exit command will first make you exit the Gentoo prefix environment. Only the second will take you out of the container, and print where the tarball will be stored:

    [EESSI 2023.06] $ exit\nlogout\nLeaving Gentoo Prefix with exit status 1\nApptainer> exit\nexit\nSaved contents of tmp directory '/tmp/eessi-debug.VgLf1v9gf0' to tarball '${HOME}/pr360/EESSI-1698056784.tgz' (to resume session add '--resume ${HOME}/pr360/EESSI-1698056784.tgz')\n

    Note that the tarballs can be quite sizeable, so make sure to pick a filesystem where you have a large enough quotum.

    Next time you want to continue investigating this issue, you can start the container with --resume DIR/TGZ and continue where you left off, having all dependencies already built and available.

    SINGULARITY_CACHEDIR=${eessi_common_dir}/container_cache ./eessi_container.sh --access rw --nvidia all --host-injections ${eessi_common_dir}/host_injections --save ${eessi_pr_dir}/EESSI-1698056784.tgz\n

    For a detailed description on using the script eessi_container.sh, see here.

    Note

    Reusing a previously downloaded container, or existing CUDA installation from a host_injections is not be a good approach if those could be the cause of your issues. If you are unsure if this is the case, simply follow the regular approach to starting the EESSI container.

    Note

    It is recommended to clean the container cache and host_injections directories every now and again, to make sure you pick up the latest changes for those two components.

    "},{"location":"adding_software/debugging_failed_builds/#start-the-gentoo-prefix-environment","title":"Start the Gentoo Prefix environment","text":"

    The next step is to start the Gentoo Prefix environment.

    Before we start, check the current values of ${EESSI_CVMFS_REPO} and ${EESSI_VERSION} so that you can reset them later:

    echo ${EESSI_CVMFS_REPO}\necho ${EESSI_VERSION}\n

    Then, we set EESSI_OS_TYPE and EESSI_CPU_FAMILY and run the startprefix command to start the Gentoo Prefix environment:

    export EESSI_OS_TYPE=linux  # We only support Linux for now\nexport EESSI_CPU_FAMILY=$(uname -m)\n${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/compat/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}/startprefix\n

    Now, reset the ${EESSI_CVMFS_REPO} and ${EESSI_VERSION} in your prefix environment with the initial values (printed in the echo statements above)

    export EESSI_CVMFS_REPO=...\nexport EESSI_VERSION=...\n

    Note

    By activating the Gentoo Prefix environment, the system tools (e.g. ls) you would normally use are now provided by Gentoo Prefix, instead of the container OS. E.g. running which ls after starting the prefix environment as above will return /cvmfs/software.eessi.io/versions/2023.06/compat/linux/x86_64/bin/ls. This makes the builds completely independent from the container OS.

    "},{"location":"adding_software/debugging_failed_builds/#building-for-the-generic-optimization-target","title":"Building for the generic optimization target","text":"

    If you want to replicate a build with generic optimization (i.e. in $EESSI_CVMFS_REPO/versions/${EESSI_VERSION}/software/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}/generic) you will need to set the following environment variable:

    export EESSI_CPU_FAMILY=$(uname -m) && export EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_CPU_FAMILY}/generic\n

    "},{"location":"adding_software/debugging_failed_builds/#building-software-with-the-eessi-install-softwaresh-script","title":"Building software with the EESSI-install-software.sh script","text":"

    The Automatic build and deploy bot installs software by executing the EESSI-install-software.sh script. The advantage is that running this script is the closest you can get to replicating the bot's behaviour - and thus the failure. The downside is that if a PR adds a lot of software, it may take quite a long time to run - even if you might already know what the problematic software package is. In that case, you might be better off following the steps under (Building software from an easystack file)[#building-software-from-an-easystack-file] or (Building an individual package)[#building-an-individual-package].

    Note that you could also combine approaches: first build everything using the EESSI-install-software.sh script, until you reproduce the failure. Then, start making modifications (e.g. changes to the EasyConfig, patches, etc) and trying to rebuild that package individually to test your changes.

    To build software using the EESSI-install-software.sh script, you'll first need to get the diff file for the PR. This is used by the EESSI-install-software.sh script to see what is changed in this PR - and thus what needs to be build for this PR. To download the diff for PR 360, we would e.g. do

    wget https://github.com/EESSI/software-layer/pull/360.diff\n

    Now, we run the EESSI-install-software.sh script:

    ./EESSI-install-software.sh\n
    "},{"location":"adding_software/debugging_failed_builds/#building-software-from-an-easystack-file","title":"Building software from an easystack file","text":""},{"location":"adding_software/debugging_failed_builds/#starting-the-eessi-software-environment","title":"Starting the EESSI software environment","text":"

    To activate the software environment, run

    source ${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/init/bash\n

    Note

    If you get an error bash: /versions//init/bash: No such file or directory, you forgot to reset the ${EESSI_CVFMS_REPO} and ${EESSI_VERSION} environment variables at the end of the previous step.

    Note

    If you want to build with generic optimization, you should run export EESSI_CPU_FAMILY=$(uname -m) && export EESSI_SOFTWARE_SUBDIR_OVERRIDE=${EESSI_CPU_FAMILY}/generic before sourcing.

    For more info on starting the EESSI software environment, see here

    "},{"location":"adding_software/debugging_failed_builds/#configure-easybuild","title":"Configure EasyBuild","text":"

    It is important that we configure EasyBuild in the same way as the bot uses it, with one small exceptions: our working directory will be different. Typically, that doesn't matter, but it's good to be aware of this one difference, in case you fail to replicate the build failure.

    In this example, we create a unique temporary directory inside /tmp to serve both as our workdir. Finally, we will source the configure_easybuild script, which will configure EasyBuild by setting environment variables.

    export WORKDIR=$(mktemp --directory --tmpdir=/tmp  -t eessi-debug.XXXXXXXXXX)\nsource configure_easybuild\n
    Among other things, the configure_easybuild script sets the install path for EasyBuild to point to the correct installation directory in (to ${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/software/${EESSI_OS_TYPE}/${EESSI_SOFTWARE_SUBDIR}). This is the exact same path the bot uses to build, and uses a writeable overlay filesystem in the container to write to a path in /cvmfs (which normally is read-only). This is identical to what the bot does.

    Note

    If you started the container using --resume, you may want WORKDIR to point to the workdir you created previously (instead of creating a new, temporary directory with mktemp).

    Note

    If you want to replicate a build with generic optimization (i.e. in $EESSI_CVMFS_REPO/versions/${EESSI_VERSION}/software/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}/generic) you will need to set export EASYBUILD_OPTARCH=GENERIC after sourcing configure_easybuild.

    Next, we need to determine the correct version of EasyBuild to load. Since the example PR changes the file eessi-2023.06-eb-4.8.1-2021b.yml, this tells us the bot was using version 4.8.1 of EasyBuild to build this. Thus, we load that version of the EasyBuild module and check if everything was configured correctly:

    module load EasyBuild/4.8.1\neb --show-config\n
    You should get something similar to

    #\n# Current EasyBuild configuration\n# (C: command line argument, D: default value, E: environment variable, F: configuration file)\n#\nbuildpath            (E) = /tmp/easybuild/easybuild/build\ncontainerpath        (E) = /tmp/easybuild/easybuild/containers\ndebug                (E) = True\nexperimental         (E) = True\nfilter-deps          (E) = Autoconf, Automake, Autotools, binutils, bzip2, DBus, flex, gettext, gperf, help2man, intltool, libreadline, libtool, Lua, M4, makeinfo, ncurses, util-linux, XZ, zlib, Yasm\nfilter-env-vars      (E) = LD_LIBRARY_PATH\nhooks                (E) = ${HOME}/software-layer/eb_hooks.py\nignore-osdeps        (E) = True\ninstallpath          (E) = /tmp/easybuild/software/linux/aarch64/neoverse_n1\nmodule-extensions    (E) = True\npackagepath          (E) = /tmp/easybuild/easybuild/packages\nprefix               (E) = /tmp/easybuild/easybuild\nread-only-installdir (E) = True\nrepositorypath       (E) = /tmp/easybuild/easybuild/ebfiles_repo\nrobot-paths          (D) = /cvmfs/software.eessi.io/versions/2023.06/software/linux/aarch64/neoverse_n1/software/EasyBuild/4.8.1/easybuild/easyconfigs\nrpath                (E) = True\nsourcepath           (E) = /tmp/easybuild/easybuild/sources:\nsysroot              (E) = /cvmfs/software.eessi.io/versions/2023.06/compat/linux/aarch64\ntrace                (E) = True\nzip-logs             (E) = bzip2\n
    "},{"location":"adding_software/debugging_failed_builds/#building-everything-in-the-easystack-file","title":"Building everything in the easystack file","text":"

    In our example PR, the easystack file that was changed was eessi-2023.06-eb-4.8.1-2021b.yml. To build this, we run (in the directory that contains the checkout of this feature branch):

    eb --easystack eessi-2023.06-eb-4.8.1-2021b.yml --robot\n
    After some time, this build fails while trying to build Plumed, and we can access the build log to look for clues on why it failed.

    "},{"location":"adding_software/debugging_failed_builds/#building-an-individual-package","title":"Building an individual package","text":"

    First, prepare the environment by following the [Starting the EESSI software environment][#starting-the-eessi-software-environment] and Configure EasyBuild above.

    In our example PR, the individual package that was added to eessi-2023.06-eb-4.8.1-2021b.yml was LAMMPS-23Jun2022-foss-2021b-kokkos.eb. To mimic the build behaviour, we'll also have to (re)use any options that are listed in the easystack file for LAMMPS-23Jun2022-foss-2021b-kokkos.eb, in this case the option --from-pr 19000. Thus, to build, we run:

    eb LAMMPS-23Jun2022-foss-2021b-kokkos.eb --robot --from-pr 19000\n
    After some time, this build fails while trying to build Plumed, and we can access the build log to look for clues on why it failed.

    Note

    While this might be faster than the easystack-based approach, this is not how the bot builds. So why it may reproduce the failure the bot encounters, it may not reproduce the bug at all (no failure) or run into different bugs. If you want to be sure, use the easystack-based approach.

    "},{"location":"adding_software/debugging_failed_builds/#known-causes-of-issues-in-eessi","title":"Known causes of issues in EESSI","text":""},{"location":"adding_software/debugging_failed_builds/#the-custom-system-prefix-of-the-compatibility-layer","title":"The custom system prefix of the compatibility layer","text":"

    Some installations might expect the system root (sysroot, for short) to be in /. However, in case of EESSI, we are building against the OS in the compatibility layer. Thus, our sysroot is something like ${EESSI_CVMFS_REPO}/versions/${EESSI_VERSION}/compat/${EESSI_OS_TYPE}/${EESSI_CPU_FAMILY}. This can cause issues if installation procedures assume the sysroot is in /.

    One example of a sysroot issue was in installing wget. The EasyConfig for wget defined

    # make sure pkg-config picks up system packages (OpenSSL & co)\npreconfigopts = \"export PKG_CONFIG_PATH=/usr/lib64/pkgconfig:/usr/lib/pkgconfig:/usr/lib/x86_64-linux-gnu/pkgconfig && \"\nconfigopts = '--with-ssl=openssl '\n
    This will not work in EESSI, since the OpenSSL should be picked up from the compatibility layer. This was fixed by changing the EasyConfig to read
    preconfigopts = \"export PKG_CONFIG_PATH=%(sysroot)s/usr/lib64/pkgconfig:%(sysroot)s/usr/lib/pkgconfig:%(sysroot)s/usr/lib/x86_64-linux-gnu/pkgconfig && \"\nconfigopts = '--with-ssl=openssl\n
    The %(sysroot)s is a template value which EasyBuild will resolve to the value that has been configured in EasyBuild for sysroot (it is one of the fields printed by eb --show-config if a non-standard sysroot is configured).

    If you encounter issues where the installation can not find something that is normally provided by the OS (i.e. not one of the dependencies in your module environment), you may need to resort to a similar approach.

    "},{"location":"adding_software/debugging_failed_builds/#the-writeable-overlay","title":"The writeable overlay","text":"

    The writeable overlay in the container is known to be a bit slow sometimes. Thus, we have seen tests failing because they exceed some timeout (e.g. this issue).

    To investigate if the writeable overlay is somehow the issue, you can make sure the installation gets done somewhere else, e.g. in the temporary directory in /tmp that you created as workdir. To do this, set

    export EASYBUILD_INSTALLPATH=${WORKDIR}\n

    after the step in which you have sourced the configure_easybuild script. Note that in order to find (with module av) any modules that get installed here, you will need to add this path to the MODULEPATH:

    module use ${EASYBUILD_INSTALLPATH}/modules/all\n

    Then, retry building the software (as described above). If the build now succeeds, you know that indeed the writeable overlay caused the issue. We have to build in this writeable overlay when we do real deployments. Thus, if you hit such a timeout, try to see if you can (temporarily) modify the timeout value in the test so that it passes.

    "},{"location":"adding_software/deploying_software/","title":"Deploying software","text":"

    (for maintainers)

    "},{"location":"adding_software/deploying_software/#instructing-the-bot-to-deploy","title":"Instructing the bot to deploy","text":"

    To make the bot deploy the successfully built software, you should issue the corresponding instruction to the bot.

    For more information, see the deploying section in the bot documentation.

    Warning

    Permission to trigger deployment of software installations must be granted to your GitHub account first!

    See bot permissions for more information.

    "},{"location":"adding_software/deploying_software/#merging-the-pull-request","title":"Merging the pull request","text":"

    You should be able to verify in the pull request that the ingestion has been done, since the CI should fail initially to indicate that some software installations listed in your modified easystack are missing.

    Once the ingestion has been done, simply re-triggering the CI workflow should be sufficient to make it pass , and then the pull request can be merged.

    Note

    This assumes that the easystack file being modified is considered by the CI workflow file (.github/workflows/test_eessi.yml) that checks for missing installations, in the correct branch (for example 2023.06) of the software-layer.

    If that's not the case yet, update this workflow in your pull request as well to add the missing easystack file!

    Warning

    You need permissions to re-trigger CI workflows and merge pull requests in the software-layer repository.

    Ask for help in the #software-layer channel of the EESSI Slack if needed!

    "},{"location":"adding_software/deploying_software/#getting-help","title":"Getting help","text":"

    If you have any questions, or if you need help with something, don't hesitate to contact us via the #software-layer channel of the EESSI Slack.

    "},{"location":"adding_software/opening_pr/","title":"Opening a pull request","text":"

    (for contributors)

    To add software to EESSI, you should go through the semi-automatic software installation procedure by:

    • 1) Making a pull request to the software-layer repository to (add or) update an easystack file that is used by EasyBuild to install software;
    • 2) Instructing the bot to build the software on all supported CPU microarchitectures;
    • 3) Instructing the bot to deploy the built software for ingestion into the EESSI repository;
    • 4) Merging the pull request once CI indicates that the software has been ingested.

    Warning

    Make sure you are also aware of our contribution policy when adding software to EESSI.

    "},{"location":"adding_software/opening_pr/#preparation","title":"Preparation","text":"

    Before you can make a pull request to the software-layer, you should fork the repository in your GitHub account.

    For the remainder of these instructions, we assume that your GitHub account is @koala .

    Note

    Don't forget to replace koala with the name of your GitHub account in the commands below!

    1) Clone the EESSI/software-layer repository:

    mkdir EESSI\ncd EESSI\ngit clone https://github.com/EESSI/software-layer\ncd software-layer\n

    2) Add your fork as a remote

    git remote add koala git@github.com:koala/software-layer.git\n

    3) Check out the branch that corresponds to the version of EESSI repository you want to add software to, for example 2023.06-software.eessi.io:

    git checkout 2023.06-software.eessi.io\n

    Note

    The commands above only need to be run once, to prepare your setup for making pull requests.

    "},{"location":"adding_software/opening_pr/#software_layer_pull_request","title":"Creating a pull request","text":"

    1) Make sure that your 2023.06-software.eessi.io branch in the checkout of the EESSI/software-layer repository is up-to-date

    cd EESSI/software-layer\ngit checkout 2023.06-software.eessi.io \ngit pull origin 2023.06-software.eessi.io \n

    2) Create a new branch (use a sensible name, not example_branch as below), and check it out

    git checkout -b example_branch\n

    3) Determine the correct easystack file to change, and add one or more lines to it that specify which easyconfigs should be installed

    echo '  - example-1.2.3-GCC-12.3.0.eb' >> easystacks/software.eessi.io/2023.06/eessi-2023.06-eb-4.8.2-2023a.yml\n

    4) Stage and commit the changes into your your branch with a sensible message

    git add easystacks/software.eessi.io/2023.06/eessi-2023.06-eb-4.8.2-2023a.yml\ngit commit -m \"{2023.06}[GCC/12.3.0] example 1.2.3\"\n

    5) Push your branch to your fork of the software-layer repository

    git push koala example_branch\n

    6) Go to the GitHub web interface to open your pull request, or use the helpful link that should show up in the output of the git push command.

    Make sure you target the correct branch: the one that corresponds to the version of EESSI you want to add software to (like 2023.06-software.eessi.io).

    If all goes well, one or more bots should almost instantly create a comment in your pull request with an overview of how it is configured - you will need this information when providing build instructions.

    "},{"location":"adding_software/overview/","title":"Overview of adding software to EESSI","text":"

    We welcome contributions to the EESSI software stack. This page shows the procedure and provides links to the contribution policy and the technical details of making a contribution.

    "},{"location":"adding_software/overview/#contribute-a-software-to-the-eessi-software-stack","title":"Contribute a software to the EESSI software stack","text":"
    \n%%{init: { 'theme':'forest', 'sequence': {'useMaxWidth':false} } }%%\nflowchart TB\n    I(contributor)  \n    K(reviewer)\n    A(Is there an EasyConfig for software) -->|No|B(Create an EasyConfig and contribute it to EasyBuild)\n    A --> |Yes|D(Create a PR to software-layer)\n    B --> C(Evaluate and merge pull request)\n    C --> D\n    D --> E(Review PR & trigger builds)\n    E --> F(Debug build issue if needed)\n    F --> G(Deploy tarballs to S3 bucket)\n    G --> H(Ingest tarballs in EESSI by merging staging PRs)\n     classDef blue fill:#9abcff,stroke:#333,stroke-width:2px;\n     class A,B,D,F,I blue\n     click B \"https://easybuild.io/\"\n     click D \"../opening_pr/\"\n     click F \"../debugging_failed_builds/\"\n
    "},{"location":"adding_software/overview/#contributing-a-reframe-test-to-the-eessi-test-suite","title":"Contributing a ReFrame test to the EESSI test suite","text":"

    Ideally, a contributor prepares a ReFrame test for the software to be added to the EESSI software stack.

    \n%%{init: { 'theme':'forest', 'sequence': {'useMaxWidth':false} } }%%\nflowchart TB\n\n    Z(Create ReFrame test & PR to tests-suite) --> Y(Review PR & run new test)\n    Y --> W(Debug issue if needed) \n    W --> V(Review PR if needed)\n    V --> U(Merge PR)\n     classDef blue fill:#9abcff,stroke:#333,stroke-width:2px;\n     class Z,W blue\n
    "},{"location":"adding_software/overview/#more-about-adding-software-to-eessi","title":"More about adding software to EESSI","text":"
    • Contribution policy
    • Opening a pull request (for contributors)
    • Building software (for maintainers)
    • Debugging failed builds (for contributors + maintainers)
    • Deploying software (for maintainers)

    If you need help with adding software to EESSI, please open a support request.

    "},{"location":"blog/","title":"Blog","text":""},{"location":"blog/2024/05/17/isc24/","title":"EESSI promo tour @ ISC'24 (May 2024, Hamburg)","text":"

    This week, we had the privilege of attending the ISC'24 conference in the beautiful city of Hamburg, Germany. This was an excellent opportunity for us to showcase EESSI, and gain valuable insights and feedback from the HPC community.

    "},{"location":"blog/2024/05/17/isc24/#bof-session-on-eessi","title":"BoF session on EESSI","text":"

    The EESSI Birds-of-a-Feather (BoF) session on Tuesday morning, part of the official ISC'24 program, was the highlight of our activities in Hamburg.

    It was well attended, with well over 100 people joining us at 9am.

    During this session, we introduced the EESSI project with a short presentation, followed by a well-received live hands-on demo of installing and using EESSI by spinning up an \"empty\" Linux virtual machine instance in Amazon EC2 and getting optimized installations of popular scientific applications like GROMACS and TensorFlow running in a matter of minutes.

    During the second part of the BoF session, we engaged with the audience through an interactive poll and by letting attendees ask questions.

    The presentation slides, including the results of the interactive poll and questions that were raised by attendees, are available here.

    "},{"location":"blog/2024/05/17/isc24/#workshops","title":"Workshops","text":"

    During the last day of ISC'24, EESSI was present in no less than three different workshops.

    "},{"location":"blog/2024/05/17/isc24/#risc-v-workshop","title":"RISC-V workshop","text":"

    At the Fourth International workshop on RISC-V for HPC, Juli\u00e1n Morillo (BSC) presented our paper \"Preparing to Hit the Ground Running: Adding RISC-V support to EESSI\" (slides available here).

    Juli\u00e1n covered the initial work that was done in the scope of the MultiXscale EuroHPC Centre-of-Excellence to add support for RISC-V to EESSI, outlined the challenges we encountered, and shared the lessons we have learned along the way.

    "},{"location":"blog/2024/05/17/isc24/#ahug-workshop","title":"AHUG workshop","text":"

    During the Arm HPC User Group (AHUG) workshop, Kenneth Hoste (HPC-UGent) gave a talk entitled \"Extending Arm\u2019s Reach by Going EESSI\" (slides available here).

    Next to a high-level introduction to EESSI, we briefly covered some of the challenges we encountered when testing the optimized software installations that we had built for the Arm Neoverse V1 microarchitecture, including bugs in OpenMPI and GROMACS.

    Kenneth gave a live demonstration of how to get access to EESSI and start running the optimized software installations we provide through our CernVM-FS repository on a fresh AWS Graviton 3 instance in a matter of minutes.

    "},{"location":"blog/2024/05/17/isc24/#pop-workshop","title":"POP workshop","text":"

    In the afternoon on Thursday, Lara Peeters (HPC-UGent) presented MultiXscale during the Readiness of HPC Extreme-scale Applications workshop, which was organised by the POP EuroHPC Centre-of-Excellence (slides available here).

    Lara outlined the pilot use cases on which MultiXscale focuses, and explained how EESSI helps to achieve the goals of MultiXscale in terms of Productivity, Performance, and Portability.

    At the end of the workshop, a group picture was taken with both organisers and speakers, which was a great way to wrap up a busy week in Hamburg!

    "},{"location":"blog/2024/05/17/isc24/#talks-and-demos-on-eessi-at-exhibit","title":"Talks and demos on EESSI at exhibit","text":"

    Not only was EESSI part of the official ISC'24 program via a dedicated BoF session and various workshops: we were also prominently present on the exhibit floor.

    "},{"location":"blog/2024/05/17/isc24/#microsoft-azure-booth","title":"Microsoft Azure booth","text":"

    Microsoft Azure invited us to give a 1-hour introductory presentation on EESSI on both Monday and Wednesday at their booth during the ISC'24 exhibit, as well as to provide live demonstrations at the demo corner of their booth on Tuesday afternoon on how to get access to EESSI and the user experience it provides.

    Exhibit attendees were welcome to pass by and ask questions, and did so throughout the full 4 hours we were present there.

    Both Microsoft Azure and AWS have been graciously providing resources in their cloud infrastructure free-of-cost for developing, testing, and demonstrating EESSI for several years now.

    "},{"location":"blog/2024/05/17/isc24/#eurohpc-booth","title":"EuroHPC booth","text":"

    The MultiXscale EuroHPC Centre-of-Excellence we are actively involved in, and through which the development of EESSI is being co-funded since Jan'23, was invited by the EuroHPC JU to present the goals and preliminary achievements at their booth.

    Elisabeth Ortega (HPCNow!) did the honours to give the last talk at the EuroHPC JU booth of the ISC'24 exhibit.

    "},{"location":"blog/2024/05/17/isc24/#stickers","title":"Stickers!","text":"

    Last but not least: we handed out a boatload free stickers with the logo of both MultiXscale and EESSI itself, as well as of various of the open source software projects we leverage, including EasyBuild, Lmod, and CernVM-FS.

    We have mostly exhausted our sticker collection during ISC'24, but don't worry: we will make sure we have more available at upcoming events...

    "},{"location":"filesystem_layer/stratum1/","title":"Setting up a Stratum 1","text":"

    Setting up a Stratum 1 involves the following steps:

    • set up the Stratum 1, preferably by running the Ansible playbook that we provide;
    • request a Stratum 0 firewall exception for your Stratum 1 server;
    • request a <your site>.stratum1.cvmfs.eessi-infra.org DNS entry;
    • open a pull request to include the URL to your Stratum 1 in the EESSI configuration.

    The last two steps can be skipped if you want to host a \"private\" Stratum 1 for your site.

    "},{"location":"filesystem_layer/stratum1/#requirements-for-a-stratum-1","title":"Requirements for a Stratum 1","text":"

    The main requirements for a Stratum 1 server are a good network connection to the clients it is going to serve, and sufficient disk space. For the EESSI repository, a few hundred gigabytes should suffice, but for production environments at least 1 TB would be recommended.

    In terms of cores and memory, a machine with just a few (~4) cores and 4-8 GB of memory should suffice.

    Various Linux distributions are supported, but we recommend one based on RHEL 7 or 8.

    Finally, make sure that ports 80 (for the Apache web server) and 8000 are open.

    "},{"location":"filesystem_layer/stratum1/#step-1-set-up-the-stratum-1","title":"Step 1: set up the Stratum 1","text":"

    The recommended way for setting up an EESSI Stratum 1 is by running the Ansible playbook stratum1.yml from the filesystem-layer repository on GitHub.

    Installing a Stratum 1 requires a GEO API license key, which will be used to find the (geographically) closest Stratum 1 server for your client and proxies. More information on how to (freely) obtain this key is available in the CVMFS documentation: https://cvmfs.readthedocs.io/en/stable/cpt-replica.html#geo-api-setup.

    You can put your license key in the local configuration file inventory/local_site_specific_vars.yml.

    Furthermore, the Stratum 1 runs a Squid server. The template configuration file can be found at templates/eessi_stratum1_squid.conf.j2. If you want to customize it, for instance for limiting the access to the Stratum 1, you can make your own version of this template file and point to it by setting local_stratum1_cvmfs_squid_conf_src in inventory/local_site_specific_vars.yml. See the comments in the example file for more details.

    Start by installing Ansible:

    sudo yum install -y ansible\n

    Then install Ansible roles for EESSI:

    ansible-galaxy role install -r requirements.yml -p ./roles --force\n

    Make sure you have enough space in /srv (on the Stratum 1) since the snapshot of the Stratum 0 will end up there by default. To alter the directory where the snapshot gets copied to you can add this variable in inventory/host_vars/<url-or-ip-to-your-stratum1>:

    cvmfs_srv_mount: /srv\n

    Make sure that you have added the hostname or IP address of your server to the inventory/hosts file. Finally, install the Stratum 1 using one of the two following options.

    Option 1:

    # -b to run as root, optionally use -K if a sudo password is required\nansible-playbook -b [-K] -e @inventory/local_site_specific_vars.yml stratum1.yml\n

    Option2:

    Create a ssh key pair and make sure the ansible-host-keys.pub is in the $HOME/.ssh/authorized_keys file on your Stratum 1 server.

    ssh-keygen -b 2048 -t rsa -f ~/.ssh/ansible-host-keys -q -N \"\"\n

    Then run the playbook:

    ansible-playbook -b --private-key ~/.ssh/ansible-host-keys -e @inventory/local_site_specific_vars.yml stratum1.yml\n

    Running the playbook will automatically make replicas of all the repositories defined in group_vars/all.yml.

    "},{"location":"filesystem_layer/stratum1/#step-2-request-a-firewall-exception","title":"Step 2: request a firewall exception","text":"

    (This step is not implemented yet and can be skipped)

    You can request a firewall exception rule to be added for your Stratum 1 server by opening an issue on the GitHub page of the filesystem layer repository.

    Make sure to include the IP address of your server.

    "},{"location":"filesystem_layer/stratum1/#step-3-verification-of-the-stratum-1","title":"Step 3: Verification of the Stratum 1","text":"

    When the playbook has finished your Stratum 1 should be ready. In order to test your Stratum 1, even without a client installed, you can use curl.

    curl --head http://<url-or-ip-to-your-stratum1>/cvmfs/software.eessi.io/.cvmfspublished\n
    This should return:

    HTTP/1.1 200 OK\n...\nX-Cache: MISS from <url-or-ip-to-your-stratum1>\n

    The second time you run it, you should get a cache hit:

    X-Cache: HIT from <url-or-ip-to-your-stratum1>\n

    Example with the Norwegian Stratum 1:

    curl --head http://bgo-no.stratum1.cvmfs.eessi-infra.org/cvmfs/software.eessi.io/.cvmfspublished\n

    You can also test access to your Stratum 1 from a client, for which you will have to install the CVMFS client.

    Then run the following command to add your newly created Stratum 1 to the existing list of EESSI Stratum 1 servers by creating a local CVMFS configuration file:

    echo 'CVMFS_SERVER_URL=\"http://<url-or-ip-to-your-stratum1>/cvmfs/@fqrn@;$CVMFS_SERVER_URL\"' | sudo tee -a /etc/cvmfs/domain.d/eessi-hpc.org.local\n

    If this is the first time you set up the client you now run:

    sudo cvmfs_config setup\n

    If you already had configured the client before, you can simply reload the config:

    sudo cvmfs_config reload -c software.eessi.io\n

    Finally, verify that the client connects to your new Stratum 1 by running:

    cvmfs_config stat -v software.eessi.io\n

    Assuming that your new Stratum 1 is the geographically closest one to your client, this should return:

    Connection: http://<url-or-ip-to-your-stratum1>/cvmfs/software.eessi.io through proxy DIRECT (online)\n
    "},{"location":"filesystem_layer/stratum1/#step-4-request-an-eessi-dns-name","title":"Step 4: request an EESSI DNS name","text":"

    In order to keep the configuration clean and easy, all the EESSI Stratum 1 servers have a DNS name <your site>.stratum1.cvmfs.eessi-infra.org, where <your site> is often a short name or abbreviation followed by the country code (e.g. rug-nl or bgo-no). You can request this for your Stratum 1 by mentioning this in the issue that you created in Step 2, or by opening another issue.

    "},{"location":"filesystem_layer/stratum1/#step-5-include-your-stratum-1-in-the-eessi-configuration","title":"Step 5: include your Stratum 1 in the EESSI configuration","text":"

    If you want to include your Stratum 1 in the EESSI configuration, i.e. allow any (nearby) client to be able to use it, you can open a pull request with updated configuration files. You will only have to add the URL to your Stratum 1 to the urls list of the eessi_cvmfs_server_urls variable in the all.yml file.

    "},{"location":"getting_access/eessi_container/","title":"EESSI container script","text":"

    The eessi_container.sh script provides a very easy yet versatile means to access EESSI. It is the preferred method to start an EESSI container as it has support for many different scenarios via various options.

    This page guides you through several example scenarios illustrating the use of the script.

    "},{"location":"getting_access/eessi_container/#prerequisites","title":"Prerequisites","text":"
    • Apptainer 1.0.0 (or newer), or Singularity 3.7.x
      • Check with apptainer --version or singularity --version
      • Support for the --fusemount option in the shell and run subcommands is required
    • Git
      • Check with git --version
    "},{"location":"getting_access/eessi_container/#preparation","title":"Preparation","text":"

    Clone the EESSI/software-layer repository and change into the software-layer directory by running these commands:

    git clone https://github.com/EESSI/software-layer.git\ncd software-layer\n
    "},{"location":"getting_access/eessi_container/#quickstart","title":"Quickstart","text":"

    Run the eessi_container script (from the software-layer directory) to start a shell session in the EESSI container:

    ./eessi_container.sh\n

    Note

    Startup will take a bit longer the first time you run this because the container image is downloaded and converted.

    You should see output like

    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell  --fusemount container:cvmfs2 cvmfs-config.cern.ch /cvmfs/cvmfs-config.cern.ch --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.ymYGaZwoWC/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nApptainer> CernVM-FS: loading Fuse module... done\nCernVM-FS: loading Fuse module... done\n\nApptainer>\n

    Note

    You may have to press enter to clearly see the prompt as some messages beginning with CernVM-FS: have been printed after the first prompt Apptainer> was shown.

    To start using EESSI, see Using EESSI/Setting up your environment.

    "},{"location":"getting_access/eessi_container/#help-for-eessi_containersh","title":"Help for eessi_container.sh","text":"

    The example in the Quickstart section facilitates an interactive session with read access to the EESSI software stack. It does not require any command line options, because the script eessi_container.sh uses some carefully chosen defaults. To view all options of the script and its default values, run the command

    ./eessi_container.sh --help\n
    You should see the following output
    usage: ./eessi_container.sh [OPTIONS] [[--] SCRIPT or COMMAND]\n OPTIONS:\n  -a | --access {ro,rw}  - ro (read-only), rw (read & write) [default: ro]\n  -c | --container IMG   - image file or URL defining the container to use\n                           [default: docker://ghcr.io/eessi/build-node:debian11]\n  -g | --storage DIR     - directory space on host machine (used for\n                           temporary data) [default: 1. TMPDIR, 2. /tmp]\n  -h | --help            - display this usage information [default: false]\n  -i | --host-injections - directory to link to for host_injections \n                           [default: /..storage../opt-eessi]\n  -l | --list-repos      - list available repository identifiers [default: false]\n  -m | --mode MODE       - with MODE==shell (launch interactive shell) or\n                           MODE==run (run a script or command) [default: shell]\n  -n | --nvidia MODE     - configure the container to work with NVIDIA GPUs,\n                           MODE==install for a CUDA installation, MODE==run to\n                           attach a GPU, MODE==all for both [default: false]\n  -r | --repository CFG  - configuration file or identifier defining the\n                           repository to use [default: EESSI via\n                           container configuration]\n  -u | --resume DIR/TGZ  - resume a previous run from a directory or tarball,\n                           where DIR points to a previously used tmp directory\n                           (check for output 'Using DIR as tmp ...' of a previous\n                           run) and TGZ is the path to a tarball which is\n                           unpacked the tmp dir stored on the local storage space\n                           (see option --storage above) [default: not set]\n  -s | --save DIR/TGZ    - save contents of tmp directory to a tarball in\n                           directory DIR or provided with the fixed full path TGZ\n                           when a directory is provided, the format of the\n                           tarball's name will be {REPO_ID}-{TIMESTAMP}.tgz\n                           [default: not set]\n  -v | --verbose         - display more information [default: false]\n  -x | --http-proxy URL  - provides URL for the env variable http_proxy\n                           [default: not set]; uses env var $http_proxy if set\n  -y | --https-proxy URL - provides URL for the env variable https_proxy\n                           [default: not set]; uses env var $https_proxy if set\n\n If value for --mode is 'run', the SCRIPT/COMMAND provided is executed. If\n arguments to the script/command start with '-' or '--', use the flag terminator\n '--' to let eessi_container.sh stop parsing arguments.\n

    So, the defaults are equal to running the command

    ./eessi_container.sh --access ro --container docker://ghcr.io/eessi/build-node:debian11 --mode shell --repository EESSI\n
    and it would either create a temporary directory under ${TMPDIR} (if defined), or /tmp (if ${TMPDIR} is not defined).

    The remainder of this page will demonstrate different scenarios using some of the command line options used for read-only access.

    Other options supported by the script will be discussed in a yet-to-be written section covering building software to be added to the EESSI stack.

    "},{"location":"getting_access/eessi_container/#resuming-a-previous-session","title":"Resuming a previous session","text":"

    You may have noted the following line in the output of eessi_container.sh

    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).\n

    Note

    The parameter after --resume (/tmp/eessi.abc123defg) will be different when you run eessi_container.sh.

    Scroll back in your terminal and copy it so you can pass it to --resume.

    Try the following command to \"resume\" from the last session.

    ./eessi_container.sh --resume /tmp/eessi.abc123defg\n
    This should run much faster because the container image has been cached in the temporary directory (/tmp/eessi.abc123defg). You should get to the prompt (Apptainer> or Singularity>) and can use EESSI with the state where you left the previous session.

    Note

    The state refers to what was stored on disk, not what was changed in memory. Particularly, any environment (variable) settings are not restored automatically.

    Because the /tmp/eessi.abc123defg directory contains a home directory which includes the saved history of your last session, you can easily restore the environment (variable) settings. Type history to see which commands you ran. You should be able to access the history as you would do in a normal terminal session.

    "},{"location":"getting_access/eessi_container/#running-a-simple-command","title":"Running a simple command","text":"

    Let's \"ls /cvmfs/software.eessi.io\" through the eessi_container.sh script to check if the CernVM-FS EESSI repository is accessible:

    ./eessi_container.sh --mode run ls /cvmfs/software.eessi.io\n

    You should see an output such as

    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).$\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell  --fusemount container:cvmfs2 cvmfs-config.cern.ch /cvmfs/cvmfs-config.cern.ch --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.ymYGaZwoWC/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nhost_injections  latest  versions\n

    Note that this time no interactive shell session is started in the container: only the provided command is run in the container, and when that finishes you are back in the shell session where you ran the eessi_container.sh script.

    This is because we used the --mode run command line option.

    Note

    The last line in the output is the output of the ls command, which shows the contents of the /cvmfs/software.eessi.io directory.

    Also, note that there is no shell prompt (Apptainer> or Singularity), since no interactive shell session is started in the container.

    Alternatively to specify the command as we did above, you can also do the following.

    CMD=\"ls -l /cvmfs/software.eessi.io\"\n./eessi_container.sh --mode shell <<< ${CMD}\n

    Note

    We changed the mode from run to shell because we use a different method to let the script run our command, by feeding it in via the stdin input channel using <<<.

    Because shell is the default value for --mode we can also omit this and simply run

    CMD=\"ls -l /cvmfs/software.eessi.io\"\n./eessi_container.sh <<< ${CMD}\n

    "},{"location":"getting_access/eessi_container/#running-a-script","title":"Running a script","text":"

    While running simple command can be sufficient in some cases, you often want to run scripts containing multiple commands.

    Let's run the script shown below.

    First, copy-paste the contents for the script shown below, and create a file named eessi_architectures.sh in your current directory. Also make the script executable, by running:

    chmod +x eessi_architectures.sh\n

    Here are the contents for the eessi_architectures.sh script:

    #!/usr/bin/env bash\n#\n# This script determines which architectures are included in the\n# latest EESSI version. It makes use of the specific directory\n# structure in the EESSI repository.\n#\n\n# determine list of available OS types\nBASE=${EESSI_CVMFS_REPO:-/cvmfs/software.eessi.io}/latest/software\ncd ${BASE}\nfor os_type in $(ls -d *)\ndo\n    # determine architecture families\n    OS_BASE=${BASE}/${os_type}\n    cd ${OS_BASE}\n    for arch_family in $(ls -d *)\n    do\n        # determine CPU microarchitectures\n        OS_ARCH_BASE=${BASE}/${os_type}/${arch_family}\n        cd ${OS_ARCH_BASE}\n        for microarch in $(ls -d *)\n        do\n            case ${microarch} in\n                amd | intel )\n                    for sub in $(ls ${microarch})\n                    do\n                        echo \"${os_type}/${arch_family}/${microarch}/${sub}\"\n                    done\n                    ;;\n                * )\n                    echo \"${os_type}/${arch_family}/${microarch}\"\n                    ;;\n            esac\n        done\n    done\ndone\n
    Run the script as follows
    ./eessi_container.sh --mode shell < eessi_architectures.sh\n
    The output should be similar to
    Using /tmp/eessi.abc123defg as tmp storage (add '--resume /tmp/eessi.abc123defg' to resume where this session ended).$\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nlinux/aarch64/generic\nlinux/aarch64/graviton2\nlinux/aarch64/graviton3\nlinux/ppc64le/generic\nlinux/ppc64le/power9le\nlinux/x86_64/amd/zen2\nlinux/x86_64/amd/zen3\nlinux/x86_64/generic\nlinux/x86_64/intel/haswell\nlinux/x86_64/intel/skylake_avx512\n
    Lines 6 to 15 show the output of the script eessi_architectures.sh.

    If you want to use the mode run, you have to make the script's location available inside the container.

    This can be done by mapping the current directory (${PWD}), which contains eessi_architectures.sh, to any not-yet existing directory inside the container using the $SINGULARITY_BIND or $APPTAINER_BIND environment variable.

    For example:

    SINGULARITY_BIND=${PWD}:/scripts ./eessi_container.sh --mode run /scripts/eessi_architectures.sh\n

    "},{"location":"getting_access/eessi_container/#running-scripts-or-commands-with-parameters-starting-with-or-","title":"Running scripts or commands with parameters starting with - or --","text":"

    Let's assume we would like to get more information about the entries of /cvmfs/software.eessi.io. If we would just run

    ./eessi_container.sh --mode run ls -lH /cvmfs/software.eessi.io\n
    we would get an error message such as
    ERROR: Unknown option: -lH\n
    We can resolve this in two ways:

    1. Using the stdin channel as described above, for example, by simply running
      CMD=\"ls -lH /cvmfs/software.eessi.io\"\n./eessi_container.sh <<< ${CMD}\n
      which should result in the output similar to
      Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\ntotal 10\nlrwxrwxrwx 1 user user   10 Jun 30  2021 host_injections -> /opt/eessi\nlrwxrwxrwx 1 user user   16 May  4  2022 latest -> versions/2021.12\ndrwxr-xr-x 3 user user 4096 Dec 10  2021 versions\n
    2. Using the flag terminator -- which tells eessi_container.sh to stop parsing command line arguments. For example,
      ./eessi_container.sh --mode run -- ls -lH /cvmfs/software.eessi.io\n
      which should result in the output similar to
      Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q run --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif ls -lH /cvmfs/software.eessi.io\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\ntotal 10\nlrwxrwxrwx 1 user user   10 Jun 30  2021 host_injections -> /opt/eessi\nlrwxrwxrwx 1 user user   16 May  4  2022 latest -> versions/2021.12\ndrwxr-xr-x 3 user user 4096 Dec 10  2021 versions\n
    "},{"location":"getting_access/eessi_container/#running-eessi-demos","title":"Running EESSI demos","text":"

    For examples of scripts that use the software provided by EESSI, see Running EESSI demos.

    "},{"location":"getting_access/eessi_container/#launching-containers-more-quickly","title":"Launching containers more quickly","text":"

    Subsequent runs of eessi_container.sh may reuse temporary data of a previous session, which includes the pulled image of the container. However, that is not always what we want, i.e., reusing a previous session (and thereby launching the container more quickly).

    The eessi_container.sh script may (re)-use a cache directory provided via $SINGULARITY_CACHEDIR (or $APPTAINER_CACHEDIR when using Apptainer). Hence, the container image does not have to be downloaded again even when starting a new session. The example below illustrates this.

    export SINGULARITY_CACHEDIR=${PWD}/container_cache_dir\ntime ./eessi_container.sh <<< \"ls /cvmfs/software.eessi.io\"\n
    which should produce output similar to
    Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\nhost_injections  latest  versions\n\nreal    m40.445s\nuser    3m2.621s\nsys     0m7.402s\n
    The next run using the same cache directory, e.g., by simply executing
    time ./eessi_container.sh <<< \"ls /cvmfs/software.eessi.io\"\n
    is much faster
    Using /tmp/eessi.abc123defg as tmp directory (to resume session add '--resume /tmp/eessi.abc123defg').\nPulling container image from docker://ghcr.io/eessi/build-node:debian11 to /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nLaunching container with command (next line):\nsingularity -q shell --fusemount container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io /tmp/eessi.abc123defg/ghcr.io_eessi_build_node_debian11.sif\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nfuse: failed to clone device fd: Inappropriate ioctl for device\nfuse: trying to continue without -o clone_fd.\nhost_injections  latest  versions\n\nreal    0m2.781s\nuser    0m0.172s\nsys     0m0.436s\n

    Note

    Each run of eessi_container.sh (without specifying --resume) creates a new temporary directory. The temporary directory stores, among other data, the image file of the container. Thus we can ensure that the container is available locally for a subsequent run.

    However, this may quickly consume scarce resources, for example, a small partition where /tmp is located (default for temporary storage, see --help for specifying a different location).

    See next section for making sure to clean up no longer needed temporary data.

    "},{"location":"getting_access/eessi_container/#reducing-disk-usage","title":"Reducing disk usage","text":"

    By default eessi_container.sh creates a temporary directory under /tmp. The directories are named eessi.RANDOM where RANDOM is a 10-character string. The script does not automatically remove these directories. To determine their total disk usage, simply run

    du -sch /tmp/eessi.*\n
    which could result in output similar to
    333M    /tmp/eessi.session123\n333M    /tmp/eessi.session456\n333M    /tmp/eessi.session789\n997M    total\n
    Clean up disk usage by simply removing directories you do not need any longer.

    "},{"location":"getting_access/eessi_container/#eessi-container-image","title":"EESSI container image","text":"

    If you would like to directly use an EESSI container image, you can do so by configuring apptainer to correctly mount the CVMFS repository:

    # honor $TMPDIR if it is already defined, use /tmp otherwise\nif [ -z $TMPDIR ]; then\n    export WORKDIR=/tmp/$USER\nelse\n    export WORKDIR=$TMPDIR/$USER\nfi\n\nmkdir -p ${WORKDIR}/{var-lib-cvmfs,var-run-cvmfs,home}\nexport SINGULARITY_BIND=\"${WORKDIR}/var-run-cvmfs:/var/run/cvmfs,${WORKDIR}/var-lib-cvmfs:/var/lib/cvmfs\"\nexport SINGULARITY_HOME=\"${WORKDIR}/home:/home/$USER\"\nexport EESSI_REPO=\"container:cvmfs2 software.eessi.io /cvmfs/software.eessi.io\"\nexport EESSI_CONTAINER=\"docker://ghcr.io/eessi/client:centos7\"\nsingularity shell --fusemount \"$EESSI_REPO\" \"$EESSI_CONTAINER\"\n
    "},{"location":"getting_access/is_eessi_accessible/","title":"Is EESSI accessible?","text":"

    EESSI can be accessed via a native (CernVM-FS) installation, or via a container that includes CernVM-FS.

    Before you look into these options, check if EESSI is already accessible on your system.

    Run the following command:

    ls /cvmfs/software.eessi.io\n

    Note

    This ls command may take a couple of seconds to finish, since CernVM-FS may need to download or update the metadata for that directory.

    If you see output like shown below, you already have access to EESSI on your system.

    host_injections  latest  versions\n

    For starting to use EESSI, continue reading about Setting up environment.

    If you see an error message as shown below, EESSI is not yet accessible on your system.

    ls: /cvmfs/software.eessi.io: No such file or directory\n
    No worries, you don't need to be a to get access to EESSI.

    Continue reading about the Native installation of EESSI, or access via the EESSI container.

    "},{"location":"getting_access/native_installation/","title":"Native installation","text":"

    Setting up native access to EESSI, that is a system-wide deployment that does not require workarounds like using a container, requires the installation and configuration of CernVM-FS.

    This requires admin privileges, since you need to install CernVM-FS as an OS package.

    The following actions must be taken for a (basic) native installation of EESSI:

    • Installing CernVM-FS itself, ideally using the OS packages provided by the CernVM-FS project (although installing from source is also possible);
    • Installing the EESSI configuration for CernVM-FS, which can be done by installing the cvmfs-config-eessi package that we provide for the most popular Linux distributions (more information available here);
    • Creating a small client configuration file for CernVM-FS (/etc/cvmfs/default.local); see also the CernVM-FS documentation.

    The good news is that all of this only requires a handful commands :

    RHEL-based Linux distributionsDebian-based Linux distributions
    # Installation commands for RHEL-based distros like CentOS, Rocky Linux, Almalinux, Fedora, ...\n\n# install CernVM-FS\nsudo yum install -y https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release-latest.noarch.rpm\nsudo yum install -y cvmfs\n\n# install EESSI configuration for CernVM-FS\nsudo yum install -y https://github.com/EESSI/filesystem-layer/releases/download/latest/cvmfs-config-eessi-latest.noarch.rpm\n\n# create client configuration file for CernVM-FS (no squid proxy, 10GB local CernVM-FS client cache)\nsudo bash -c \"echo 'CVMFS_CLIENT_PROFILE=\"single\"' > /etc/cvmfs/default.local\"\nsudo bash -c \"echo 'CVMFS_QUOTA_LIMIT=10000' >> /etc/cvmfs/default.local\"\n\n# make sure that EESSI CernVM-FS repository is accessible\nsudo cvmfs_config setup\n
    # Installation commands for Debian-based distros like Ubuntu, ...\n\n# install CernVM-FS\nsudo apt-get install lsb-release\nwget https://ecsft.cern.ch/dist/cvmfs/cvmfs-release/cvmfs-release-latest_all.deb\nsudo dpkg -i cvmfs-release-latest_all.deb\nrm -f cvmfs-release-latest_all.deb\nsudo apt-get update\nsudo apt-get install -y cvmfs\n\n# install EESSI configuration for CernVM-FS\nwget https://github.com/EESSI/filesystem-layer/releases/download/latest/cvmfs-config-eessi_latest_all.deb\nsudo dpkg -i cvmfs-config-eessi_latest_all.deb\n\n# create client configuration file for CernVM-FS (no squid proxy, 10GB local CernVM-FS client cache)\nsudo bash -c \"echo 'CVMFS_CLIENT_PROFILE=\"single\"' > /etc/cvmfs/default.local\"\nsudo bash -c \"echo 'CVMFS_QUOTA_LIMIT=10000' >> /etc/cvmfs/default.local\"\n\n# make sure that EESSI CernVM-FS repository is accessible\nsudo cvmfs_config setup\n

    Note

    The commands above only cover the basic installation of EESSI.

    This is good enough for an individual client, or for testing purposes, but for a production-quality setup you should also set up a Squid proxy cache.

    For large-scale systems, like an HPC cluster, you should also consider setting up your own CernVM-FS Stratum-1 mirror server.

    For more details on this, please refer to the Stratum 1 and proxies section of the CernVM-FS tutorial.

    "},{"location":"known_issues/eessi-2023.06/","title":"Known issues","text":""},{"location":"known_issues/eessi-2023.06/#eessi-production-repository-v202306","title":"EESSI Production Repository (v2023.06)","text":""},{"location":"known_issues/eessi-2023.06/#failed-to-modify-ud-qp-to-init-on-mlx5_0-operation-not-permitted","title":"Failed to modify UD QP to INIT on mlx5_0: Operation not permitted","text":"

    This is an error that occurs with OpenMPI after updating to OFED 23.10.

    Their is an upstream issue on this problem opened with EasyBuild. See: https://github.com/easybuilders/easybuild-easyconfigs/issues/20233

    Workarounds

    You can instruct OpenMPI to not use libfabric and turn off `uct`(see https://openucx.readthedocs.io/en/master/running.html#running-mpi) by passing the following options to `mpirun`:

    mpirun -mca pml ucx -mca btl '^uct,ofi' -mca mtl '^ofi'\n
    Or equivalently, you can set the following environment variables:
    export OMPI_MCA_btl='^uct,ofi'\nexport OMPI_MCA_pml='ucx'\nexport OMPI_MCA_mtl='^ofi'\n
    "},{"location":"meetings/2022-09-amsterdam/","title":"EESSI Community Meeting (Sept'22, Amsterdam)","text":""},{"location":"meetings/2022-09-amsterdam/#practical-info","title":"Practical info","text":"
    • dates: Wed-Fri 14-16 Sept'22
    • in conjunction with CernVM workshop @ Nikhef (Mon-Tue 12-13 Sept'22)
    • venue: \"Polderzaal\" at Cafe-Restaurant Polder (Google Maps), sponsored by SURF
    • registration (closed since Fri 9 Sept'22)
    • Slack channel: community-meeting-2022 in EESSI Slack
    • YouTube playlist with recorded talks
    "},{"location":"meetings/2022-09-amsterdam/#agenda","title":"Agenda","text":"

    (subject to changes)

    We envision a mix of presentations, experience reports, demos, and hands-on sessions and/or hackathons related to the EESSI project.

    If you would like to give a talk or host a session, please let us know via the EESSI Slack!

    "},{"location":"meetings/2022-09-amsterdam/#wed-14-sept-2022","title":"Wed 14 Sept 2022","text":"
    • [10:00-13:00] Welcome session
      • [10:00-10:30] Walk-in, coffee
      • [10:30-12:00] Round table discussion (not live-streamed!)
    • [12:00-13:00] Lunch
    • [13:00-15:00] Presentations on EESSI
      • [13:00-13:30] Introduction to EESSI (Caspar) [slides - recording]
      • [13:30-14:00] Hands-on: how to use EESSI (Kenneth) [slides - recording]
      • [14:00-14:30] EESSI use cases (Kenneth) [(slides - recording]
      • [14:30-15:00] EESSI for sysadmins (Thomas) [slides - recording]
    • [15:00-15:30] Coffee break
    • [15:30-17:00] Presentations on EESSI (continued)
      • [15:30-16:00] Hands-on: installing EESSI (Thomas/Kenneth)
      • [16:00-16:45] ComputeCanada site talk (Bart Oldeman, remote) [slides - recording]
      • [16:45-17:15] Magic Castle (Felix-Antoine Fortin, remote) [slides - recording]
    • [19:00-...] Group dinner @ Saravanaa Bhavan (sponsored by Dell Technologies)
      • address: Stadhouderskade 123-124, Amsterdam
    "},{"location":"meetings/2022-09-amsterdam/#thu-15-sept-2022","title":"Thu 15 Sept 2022","text":"
    • [09:30-12:00] More focused presentations on aspects of EESSI
      • [09:30-10:00] EESSI behind the scenes: compat layer (Bob) [slides - recording]
      • [10:00-10:30] EESSI behind the scenes: software layer (Kenneth) [slides - recording]
      • [10:30-11:00] Coffee break
      • [11:00-11:30] EESSI behind the scenes: infrastructure (Terje) [slides - recording]
      • [11:30-12:00] Status on RISC-V support (Kenneth) [slides - recording]
    • [12:00-13:00] Lunch
    • [13:00-14:00] Discussions/hands-on sessions/hackathon
    • [14:00-14:30] Status on GPU support (Alan) [slides - recording]
    • [14:30-15:00] Status on build-and-deploy bot (Thomas) [slides - recording]
    • [15:00-15:30] Coffee break
    • [15:30-17:00] Discussions/hands-on sessions/hackathon (continued)
      • Hands-on with GPUs (Alan)
      • Hands-on with bot (Thomas/Kenneth)
    • [19:00-...] Group dinner @ Italia Oggi (sponsored by HPC-UGent)
      • address: Binnen Bantammerstraat 11, Amsterdam
    "},{"location":"meetings/2022-09-amsterdam/#fri-16-sept-2022","title":"Fri 16 Sept 2022","text":"
    • [09:30-12:00] Presentations on future work
      • [09:30-10:00] Testing in software layer (Caspar) [slides - recording]
      • [10:00-10:30] MultiXscale project (Alan) [slides - recording]
      • [10:30-11:00] Coffee break
      • [11:00-11:30] Short-term future work (Kenneth) [slides - recording]
    • [11:30-12:00] Discussion: future management structure of EESSI (Alan) [slides - recording]
    • [12:00-13:00] Lunch
    • [13:00-14:00] Site reports [recording]
      • NESSI (Thomas) [slides]
      • NLPL (Stephan) [slides]
      • HPCNow! (Danilo) [slides]
      • Azure (Hugo) [slides]
    • [14:00-14:30] Discussion: what would make or break EESSI for your site? (notes - recording)
    • [14:30-15:45] Discussions/hands-on sessions/hackathon
      • Hands-on with GPU support (Alan)
      • Hands-on with bot (Thomas/Kenneth)
      • Hands-on with software testing (Caspar)
    • We need to leave the room by 16:00!
    "},{"location":"repositories/pilot/","title":"Pilot","text":""},{"location":"repositories/pilot/#pilot-software-stack-202112","title":"Pilot software stack (2021.12)","text":""},{"location":"repositories/pilot/#caveats","title":"Caveats","text":"

    Danger

    The EESSI pilot repository is no longer actively maintained, and should not be used for production work.

    Please use the software.eessi.io repository instead.

    The current EESSI pilot software stack (version 2021.12) is the 7th iteration, and there are some known issues and limitations, please take these into account:

    • First of all: the EESSI pilot software stack is NOT READY FOR PRODUCTION!

    Do not use it for production work, and be careful when testing it on production systems!

    "},{"location":"repositories/pilot/#reporting-problems","title":"Reporting problems","text":"

    If you notice any problems, please report them via https://github.com/EESSI/software-layer/issues.

    "},{"location":"repositories/pilot/#accessing-the-eessi-pilot-repository-through-singularity","title":"Accessing the EESSI pilot repository through Singularity","text":"

    The easiest way to access the EESSI pilot repository is by using Singularity. If Singularity is installed already, no admin privileges are required. No other software is needed either on the host.

    A container image is available in the GitHub Container Registry (see https://github.com/EESSI/filesystem-layer/pkgs/container/client-pilot). It only contains a minimal operating system + the necessary packages to access the EESSI pilot repository through CernVM-FS, and it is suitable for aarch64, ppc64le, and x86_64.

    The container image can be used directly by Singularity (no prior download required), as follows:

    • First, create some local directories in /tmp/$USER which will be bind mounted in the container:

      mkdir -p /tmp/$USER/{var-lib-cvmfs,var-run-cvmfs,home}\n
      These provides space for the CernVM-FS cache, and an empty home directory to use in the container.

    • Set the $SINGULARITY_BIND and $SINGULARITY_HOME environment variables to configure Singularity:

      export SINGULARITY_BIND=\"/tmp/$USER/var-run-cvmfs:/var/run/cvmfs,/tmp/$USER/var-lib-cvmfs:/var/lib/cvmfs\"\nexport SINGULARITY_HOME=\"/tmp/$USER/home:/home/$USER\"\n

    • Start the container using singularity shell, using --fusemount to mount the EESSI pilot repository (using the cvmfs2 command that is included in the container image):

      export EESSI_PILOT=\"container:cvmfs2 pilot.eessi-hpc.org /cvmfs/pilot.eessi-hpc.org\"\nsingularity shell --fusemount \"$EESSI_PILOT\" docker://ghcr.io/eessi/client-pilot:centos7\n

    • This should give you a shell in the container, where the EESSI pilot repository is mounted:

      $ singularity shell --fusemount \"$EESSI_PILOT\" docker://ghcr.io/eessi/client-pilot:centos7\nINFO:    Using cached SIF image\nCernVM-FS: pre-mounted on file descriptor 3\nCernVM-FS: loading Fuse module... done\nSingularity>\n

    • It is possible that you see some scary looking warnings, but those can be ignored for now.

    To verify that things are working, check the contents of the /cvmfs/pilot.eessi-hpc.org/versions/2021.12 directory:

    Singularity> ls /cvmfs/pilot.eessi-hpc.org/versions/2021.12\ncompat  init  software\n

    "},{"location":"repositories/pilot/#standard-installation","title":"Standard installation","text":"

    For those with privileges on their system, there are a number of example installation scripts for different architectures and operating systems available in the EESSI demo repository.

    Here we prefer the Singularity approach as we can guarantee that the container image is up to date.

    "},{"location":"repositories/pilot/#setting-up-the-eessi-environment","title":"Setting up the EESSI environment","text":"

    Once you have the EESSI pilot repository mounted, you can set up the environment by sourcing the provided init script:

    source /cvmfs/pilot.eessi-hpc.org/versions/2021.12/init/bash\n

    If all goes well, you should see output like this:

    Found EESSI pilot repo @ /cvmfs/pilot.eessi-hpc.org/versions/2021.12!\nUsing x86_64/intel/haswell as software subdirectory.\nUsing /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/modules/all as the directory to be added to MODULEPATH.\nFound Lmod configuration file at /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/.lmod/lmodrc.lua\nInitializing Lmod...\nPrepending /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI pilot software stack, have fun!\n[EESSI pilot 2021.12] $ \n

    Now you're all set up! Go ahead and explore the software stack using \"module avail\", and go wild with testing the available software installations!

    "},{"location":"repositories/pilot/#testing-the-eessi-pilot-software-stack","title":"Testing the EESSI pilot software stack","text":"

    Please test the EESSI pilot software stack as you see fit: running simple commands, performing small calculations or running small benchmarks, etc.

    Test scripts that have been verified to work correctly using the pilot software stack are available at https://github.com/EESSI/software-layer/tree/main/tests .

    "},{"location":"repositories/pilot/#giving-feedback-or-reporting-problems","title":"Giving feedback or reporting problems","text":"

    Any feedback is welcome, and questions or problems reports are welcome as well, through one of the EESSI communication channels:

    • (preferred!) EESSI software-layer GitHub repository: https://github.com/EESSI/software-layer/issues
    • EESSI mailing list (eessi@list.rug.nl)
    • EESSI Slack: https://eessi-hpc.slack.com (get an invite via https://www.eessi-hpc.org/join)
    • monthly EESSI meetings (first Thursday of the month at 2pm CEST)
    "},{"location":"repositories/pilot/#available-software","title":"Available software","text":"

    (last update: Mar 21st 2022)

    EESSI currently supports the following HPC applications as well as all their dependencies:

    • GROMACS (2020.1 and 2020.4)
    • OpenFOAM (v2006 and 8)
    • R (4.0.0) + R-bundle-Bioconductor (3.11) + RStudio Server (1.3.1093)
    • TensorFlow (2.3.1) and Horovod (0.21.3)
    • OSU-Micro-Benchmarks (5.6.3)
    • ReFrame (3.9.1)
    • Spark (3.1.1)
    • IPython (7.15.0)
    • QuantumESPRESSO (6.6) (currently not available on ppc64le)
    • WRF (3.9.1.1)
    [EESSI pilot 2021.12] $ module --nx avail\n\n--------------------------- /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/modules/all ----------------------------\n   ant/1.10.8-Java-11                                              LMDB/0.9.24-GCCcore-9.3.0\n   Arrow/0.17.1-foss-2020a-Python-3.8.2                            lz4/1.9.2-GCCcore-9.3.0\n   Bazel/3.6.0-GCCcore-9.3.0                                       Mako/1.1.2-GCCcore-9.3.0\n   Bison/3.5.3-GCCcore-9.3.0                                       MariaDB-connector-c/3.1.7-GCCcore-9.3.0\n   Boost/1.72.0-gompi-2020a                                        matplotlib/3.2.1-foss-2020a-Python-3.8.2\n   cairo/1.16.0-GCCcore-9.3.0                                      Mesa/20.0.2-GCCcore-9.3.0\n   CGAL/4.14.3-gompi-2020a-Python-3.8.2                            Meson/0.55.1-GCCcore-9.3.0-Python-3.8.2\n   CMake/3.16.4-GCCcore-9.3.0                                      METIS/5.1.0-GCCcore-9.3.0\n   CMake/3.20.1-GCCcore-10.3.0                                     MPFR/4.0.2-GCCcore-9.3.0\n   code-server/3.7.3                                               NASM/2.14.02-GCCcore-9.3.0\n   DB/18.1.32-GCCcore-9.3.0                                        ncdf4/1.17-foss-2020a-R-4.0.0\n   DB/18.1.40-GCCcore-10.3.0                                       netCDF-Fortran/4.5.2-gompi-2020a\n   double-conversion/3.1.5-GCCcore-9.3.0                           netCDF/4.7.4-gompi-2020a\n   Doxygen/1.8.17-GCCcore-9.3.0                                    nettle/3.6-GCCcore-9.3.0\n   EasyBuild/4.5.0                                                 networkx/2.4-foss-2020a-Python-3.8.2\n   EasyBuild/4.5.1                                         (D)     Ninja/1.10.0-GCCcore-9.3.0\n   Eigen/3.3.7-GCCcore-9.3.0                                       NLopt/2.6.1-GCCcore-9.3.0\n   Eigen/3.3.9-GCCcore-10.3.0                                      NSPR/4.25-GCCcore-9.3.0\n   ELPA/2019.11.001-foss-2020a                                     NSS/3.51-GCCcore-9.3.0\n   expat/2.2.9-GCCcore-9.3.0                                       nsync/1.24.0-GCCcore-9.3.0\n   expat/2.2.9-GCCcore-10.3.0                                      numactl/2.0.13-GCCcore-9.3.0\n   FFmpeg/4.2.2-GCCcore-9.3.0                                      numactl/2.0.14-GCCcore-10.3.0\n   FFTW/3.3.8-gompi-2020a                                          OpenBLAS/0.3.9-GCC-9.3.0\n   FFTW/3.3.9-gompi-2021a                                          OpenBLAS/0.3.15-GCC-10.3.0\n   flatbuffers/1.12.0-GCCcore-9.3.0                                OpenFOAM/v2006-foss-2020a\n   FlexiBLAS/3.0.4-GCC-10.3.0                                      OpenFOAM/8-foss-2020a                              (D)\n   fontconfig/2.13.92-GCCcore-9.3.0                                OpenMPI/4.0.3-GCC-9.3.0\n   foss/2020a                                                      OpenMPI/4.1.1-GCC-10.3.0\n   foss/2021a                                                      OpenPGM/5.2.122-GCCcore-9.3.0\n   freetype/2.10.1-GCCcore-9.3.0                                   OpenSSL/1.1                                        (D)\n   FriBidi/1.0.9-GCCcore-9.3.0                                     OSU-Micro-Benchmarks/5.6.3-gompi-2020a\n   GCC/9.3.0                                                       Pango/1.44.7-GCCcore-9.3.0\n   GCC/10.3.0                                                      ParaView/5.8.0-foss-2020a-Python-3.8.2-mpi\n   GCCcore/9.3.0                                                   PCRE/8.44-GCCcore-9.3.0\n   GCCcore/10.3.0                                                  PCRE2/10.34-GCCcore-9.3.0\n   Ghostscript/9.52-GCCcore-9.3.0                                  Perl/5.30.2-GCCcore-9.3.0\n   giflib/5.2.1-GCCcore-9.3.0                                      Perl/5.32.1-GCCcore-10.3.0\n   git/2.23.0-GCCcore-9.3.0-nodocs                                 pixman/0.38.4-GCCcore-9.3.0\n   git/2.32.0-GCCcore-10.3.0-nodocs                        (D)     pkg-config/0.29.2-GCCcore-9.3.0\n   GLib/2.64.1-GCCcore-9.3.0                                       pkg-config/0.29.2-GCCcore-10.3.0\n   GLPK/4.65-GCCcore-9.3.0                                         pkg-config/0.29.2                                  (D)\n   GMP/6.2.0-GCCcore-9.3.0                                         pkgconfig/1.5.1-GCCcore-9.3.0-Python-3.8.2\n   GMP/6.2.1-GCCcore-10.3.0                                        PMIx/3.1.5-GCCcore-9.3.0\n   gnuplot/5.2.8-GCCcore-9.3.0                                     PMIx/3.2.3-GCCcore-10.3.0\n   GObject-Introspection/1.64.0-GCCcore-9.3.0-Python-3.8.2         poetry/1.0.9-GCCcore-9.3.0-Python-3.8.2\n   gompi/2020a                                                     protobuf-python/3.13.0-foss-2020a-Python-3.8.2\n   gompi/2021a                                                     protobuf/3.13.0-GCCcore-9.3.0\n   groff/1.22.4-GCCcore-9.3.0                                      pybind11/2.4.3-GCCcore-9.3.0-Python-3.8.2\n   groff/1.22.4-GCCcore-10.3.0                                     pybind11/2.6.2-GCCcore-10.3.0\n   GROMACS/2020.1-foss-2020a-Python-3.8.2                          Python/2.7.18-GCCcore-9.3.0\n   GROMACS/2020.4-foss-2020a-Python-3.8.2                  (D)     Python/3.8.2-GCCcore-9.3.0\n   GSL/2.6-GCC-9.3.0                                               Python/3.9.5-GCCcore-10.3.0-bare\n   gzip/1.10-GCCcore-9.3.0                                         Python/3.9.5-GCCcore-10.3.0\n   h5py/2.10.0-foss-2020a-Python-3.8.2                             PyYAML/5.3-GCCcore-9.3.0\n   HarfBuzz/2.6.4-GCCcore-9.3.0                                    Qt5/5.14.1-GCCcore-9.3.0\n   HDF5/1.10.6-gompi-2020a                                         QuantumESPRESSO/6.6-foss-2020a\n   Horovod/0.21.3-foss-2020a-TensorFlow-2.3.1-Python-3.8.2         R-bundle-Bioconductor/3.11-foss-2020a-R-4.0.0\n   hwloc/2.2.0-GCCcore-9.3.0                                       R/4.0.0-foss-2020a\n   hwloc/2.4.1-GCCcore-10.3.0                                      re2c/1.3-GCCcore-9.3.0\n   hypothesis/6.13.1-GCCcore-10.3.0                                RStudio-Server/1.3.1093-foss-2020a-Java-11-R-4.0.0\n   ICU/66.1-GCCcore-9.3.0                                          Rust/1.52.1-GCCcore-10.3.0\n   ImageMagick/7.0.10-1-GCCcore-9.3.0                              ScaLAPACK/2.1.0-gompi-2020a\n   IPython/7.15.0-foss-2020a-Python-3.8.2                          ScaLAPACK/2.1.0-gompi-2021a-fb\n   JasPer/2.0.14-GCCcore-9.3.0                                     scikit-build/0.10.0-foss-2020a-Python-3.8.2\n   Java/11.0.2                                             (11)    SciPy-bundle/2020.03-foss-2020a-Python-3.8.2\n   jbigkit/2.1-GCCcore-9.3.0                                       SciPy-bundle/2021.05-foss-2021a\n   JsonCpp/1.9.4-GCCcore-9.3.0                                     SCOTCH/6.0.9-gompi-2020a\n   LAME/3.100-GCCcore-9.3.0                                        snappy/1.1.8-GCCcore-9.3.0\n   libarchive/3.5.1-GCCcore-10.3.0                                 Spark/3.1.1-foss-2020a-Python-3.8.2\n   libcerf/1.13-GCCcore-9.3.0                                      SQLite/3.31.1-GCCcore-9.3.0\n   libdrm/2.4.100-GCCcore-9.3.0                                    SQLite/3.35.4-GCCcore-10.3.0\n   libevent/2.1.11-GCCcore-9.3.0                                   SWIG/4.0.1-GCCcore-9.3.0\n   libevent/2.1.12-GCCcore-10.3.0                                  Szip/2.1.1-GCCcore-9.3.0\n   libfabric/1.11.0-GCCcore-9.3.0                                  Tcl/8.6.10-GCCcore-9.3.0\n   libfabric/1.12.1-GCCcore-10.3.0                                 Tcl/8.6.11-GCCcore-10.3.0\n   libffi/3.3-GCCcore-9.3.0                                        tcsh/6.22.02-GCCcore-9.3.0\n   libffi/3.3-GCCcore-10.3.0                                       TensorFlow/2.3.1-foss-2020a-Python-3.8.2\n   libgd/2.3.0-GCCcore-9.3.0                                       time/1.9-GCCcore-9.3.0\n   libGLU/9.0.1-GCCcore-9.3.0                                      Tk/8.6.10-GCCcore-9.3.0\n   libglvnd/1.2.0-GCCcore-9.3.0                                    Tkinter/3.8.2-GCCcore-9.3.0\n   libiconv/1.16-GCCcore-9.3.0                                     UCX/1.8.0-GCCcore-9.3.0\n   libjpeg-turbo/2.0.4-GCCcore-9.3.0                               UCX/1.10.0-GCCcore-10.3.0\n   libpciaccess/0.16-GCCcore-9.3.0                                 UDUNITS/2.2.26-foss-2020a\n   libpciaccess/0.16-GCCcore-10.3.0                                UnZip/6.0-GCCcore-9.3.0\n   libpng/1.6.37-GCCcore-9.3.0                                     UnZip/6.0-GCCcore-10.3.0\n   libsndfile/1.0.28-GCCcore-9.3.0                                 WRF/3.9.1.1-foss-2020a-dmpar\n   libsodium/1.0.18-GCCcore-9.3.0                                  X11/20200222-GCCcore-9.3.0\n   LibTIFF/4.1.0-GCCcore-9.3.0                                     x264/20191217-GCCcore-9.3.0\n   libtirpc/1.2.6-GCCcore-9.3.0                                    x265/3.3-GCCcore-9.3.0\n   libunwind/1.3.1-GCCcore-9.3.0                                   xorg-macros/1.19.2-GCCcore-9.3.0\n   libxc/4.3.4-GCC-9.3.0                                           xorg-macros/1.19.3-GCCcore-10.3.0\n   libxml2/2.9.10-GCCcore-9.3.0                                    Xvfb/1.20.9-GCCcore-9.3.0\n   libxml2/2.9.10-GCCcore-10.3.0                                   Yasm/1.3.0-GCCcore-9.3.0\n   libyaml/0.2.2-GCCcore-9.3.0                                     ZeroMQ/4.3.2-GCCcore-9.3.0\n   LittleCMS/2.9-GCCcore-9.3.0                                     Zip/3.0-GCCcore-9.3.0\n   LLVM/9.0.1-GCCcore-9.3.0                                        zstd/1.4.4-GCCcore-9.3.0\n
    "},{"location":"repositories/pilot/#architecture-and-micro-architecture-support","title":"Architecture and micro-architecture support","text":""},{"location":"repositories/pilot/#x86_64","title":"x86_64","text":"
    • generic (currently implies march=x86-64 and -mtune=generic)
    • AMD
      • zen2 (Rome)
      • zen3 (Milan)
    • Intel
      • haswell
      • skylake_avx512
    "},{"location":"repositories/pilot/#aarch64arm64","title":"aarch64/arm64","text":"
    • generic (currently implies -march=armv8-a and -mtune=generic)
    • AWS Graviton2
    "},{"location":"repositories/pilot/#ppc64le","title":"ppc64le","text":"
    • generic
    • power9le
    "},{"location":"repositories/pilot/#easybuild-configuration","title":"EasyBuild configuration","text":"

    EasyBuild v4.5.1 was used to install the software in the 2021.12 version of the pilot repository. For some installations pull requests with changes that will be included in later EasyBuild versions were leveraged, see the build script that was used.

    An example configuration of the build environment based on https://github.com/EESSI/software-layer can be seen here:

    $ eb --show-config\n#\n# Current EasyBuild configuration\n# (C: command line argument, D: default value, E: environment variable, F: configuration file)\n#\nbuildpath         (E) = /tmp/eessi-build/easybuild/build\ncontainerpath     (E) = /tmp/eessi-build/easybuild/containers\ndebug             (E) = True\nfilter-deps       (E) = Autoconf, Automake, Autotools, binutils, bzip2, cURL, DBus, flex, gettext, gperf, help2man, intltool, libreadline, libtool, Lua, M4, makeinfo, ncurses, util-linux, XZ, zlib\nfilter-env-vars   (E) = LD_LIBRARY_PATH\nhooks             (E) = /home/eessi-build/software-layer/eb_hooks.py\nignore-osdeps     (E) = True\ninstallpath       (E) = /cvmfs/pilot.eessi-hpc.org/2021.06/software/linux/x86_64/intel/haswell\nmodule-extensions (E) = True\npackagepath       (E) = /tmp/eessi-build/easybuild/packages\nprefix            (E) = /tmp/eessi-build/easybuild\nrepositorypath    (E) = /tmp/eessi-build/easybuild/ebfiles_repo\nrobot-paths       (D) = /cvmfs/pilot.eessi-hpc.org/versions/2021.12/software/linux/x86_64/intel/haswell/software/EasyBuild/4.5.1/easybuild/easyconfigs\nrpath             (E) = True\nsourcepath        (E) = /tmp/eessi-build/easybuild/sources:\nsysroot           (E) = /cvmfs/pilot.eessi-hpc.org/versions/2021.12/compat/linux/x86_64\ntrace             (E) = True\nzip-logs          (E) = bzip2\n

    "},{"location":"repositories/pilot/#infrastructure-status","title":"Infrastructure status","text":"

    The status of the CernVM-FS infrastructure for the pilot repository is shown at http://status.eessi.io/pilot/.

    "},{"location":"repositories/riscv.eessi.io/","title":"EESSI RISC-V development repository (riscv.eessi.io)","text":"

    This repository contains development versions of an EESSI RISC-V software stack. Note that versions may be added, modified, or deleted at any time.

    "},{"location":"repositories/riscv.eessi.io/#accessing-the-risc-v-repository","title":"Accessing the RISC-V repository","text":"

    See Getting access; by making the EESSI CVMFS domain available, you will automatically have access to riscv.eessi.io as well.

    "},{"location":"repositories/riscv.eessi.io/#using-riscveessiio","title":"Using riscv.eessi.io","text":"

    This repository currently offers one version (20240402), and this contains both a compatibility layer and a software layer. Furthermore, initialization scripts are in place to set up the repository:

    $ source /cvmfs/riscv.eessi.io/versions/20240402/init/bash\nFound EESSI repo @ /cvmfs/riscv.eessi.io/versions/20240402!\narchdetect says riscv64/generic\nUsing riscv64/generic as software subdirectory.\nFound Lmod configuration file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/lmodrc.lua\nFound Lmod SitePackage.lua file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/SitePackage.lua\nUsing /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all as the directory to be added to MODULEPATH.\nInitializing Lmod...\nPrepending /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI (20240402), have fun!\n{EESSI 20240402} $\n

    You can even source the initialization script of the software.eessi.io production repository now, and it will automatically set up the RISC-V repository for you:

    $ source /cvmfs/software.eessi.io/versions/2023.06/init/bash \nRISC-V architecture detected, but there is no RISC-V support yet in the production repository.\nAutomatically switching to version 20240402 of the RISC-V development repository /cvmfs/riscv.eessi.io.\nFor more details about this repository, see https://www.eessi.io/docs/repositories/riscv.eessi.io/.\n\nFound EESSI repo @ /cvmfs/riscv.eessi.io/versions/20240402!\narchdetect says riscv64/generic\nUsing riscv64/generic as software subdirectory.\nFound Lmod configuration file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/lmodrc.lua\nFound Lmod SitePackage.lua file at /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/.lmod/SitePackage.lua\nUsing /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all as the directory to be added to MODULEPATH.\nUsing /cvmfs/riscv.eessi.io/host_injections/20240402/software/linux/riscv64/generic/modules/all as the site extension directory to be added to MODULEPATH.\nInitializing Lmod...\nPrepending /cvmfs/riscv.eessi.io/versions/20240402/software/linux/riscv64/generic/modules/all to $MODULEPATH...\nPrepending site path /cvmfs/riscv.eessi.io/host_injections/20240402/software/linux/riscv64/generic/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI (20240402), have fun!\n{EESSI 20240402} $ \n

    Note that we currently only provide generic builds, hence riscv64/generic is being used for all RISC-V CPUs.

    The amount of software is constantly increasing. Besides having the foss/2023b toolchain available, applications like dlb, GROMACS, OSU Micro-Benchmarks, and R are already available as well. Use module avail to get a full and up-to-date listing of available software.

    "},{"location":"repositories/riscv.eessi.io/#infrastructure-status","title":"Infrastructure status","text":"

    The status of the CernVM-FS infrastructure for this repository is shown at https://status.eessi.io.

    "},{"location":"repositories/software.eessi.io/","title":"Production EESSI repository (software.eessi.io)","text":""},{"location":"repositories/software.eessi.io/#question-or-problems","title":"Question or problems","text":"

    If you have any questions regarding EESSI, or if you experience a problem in accessing or using it, please open a support request.

    "},{"location":"repositories/software.eessi.io/#accessing-the-eessi-repository","title":"Accessing the EESSI repository","text":"

    See Getting access.

    "},{"location":"repositories/software.eessi.io/#using-softwareeessiio","title":"Using software.eessi.io","text":"

    See Using EESSI.

    "},{"location":"repositories/software.eessi.io/#available-software","title":"Available software","text":"

    Detailed overview of available software coming soon!

    For now, use module avail after initializing the EESSI environment.

    "},{"location":"repositories/software.eessi.io/#architecture-and-micro-architecture-support","title":"Architecture and micro-architecture support","text":"

    See CPU targets.

    "},{"location":"repositories/software.eessi.io/#infrastructure-status","title":"Infrastructure status","text":"

    The status of the CernVM-FS infrastructure for the production repository is shown at https://status.eessi.io.

    "},{"location":"software_layer/build_nodes/","title":"Build nodes","text":"

    Any system can be used as a build node to create additional software installations that should be added to the EESSI CernVM-FS repository.

    "},{"location":"software_layer/build_nodes/#requirements","title":"Requirements","text":"

    OS and software:

    • GNU/Linux (any distribution) as operating system;
    • a recent version of Singularity (>= 3.6 is recommended);
      • check with singularity --version
    • screen or tmux is highly recommended;

    Admin privileges are not required, as long as Singularity is installed.

    Resources:

    • 8 or more cores is recommended (though not strictly required);
    • at least 50GB of free space on a local filesystem (like /tmp);
    • at least 16GB of memory (2GB/core or higher recommended);

    Instructions to install Singularity and screen (click to show commands):

    CentOS 8 (x86_64 or aarch64 or ppc64le)
    sudo dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm\nsudo dnf update -y\nsudo dnf install -y screen singularity\n
    "},{"location":"software_layer/build_nodes/#setting-up-the-container","title":"Setting up the container","text":"

    Warning

    It is highly recommended to start a screen or tmux session first!

    A container image is provided that includes everything that is required to set up a writable overlay on top of the EESSI CernVM-FS repository.

    First, pick a location on a local filesystem for the temporary directory:

    Requirements:

    • Do not use a shared filesystem like NFS, Lustre or GPFS.
    • There should be at least 50GB of free disk space in this local filesystem (more is better).
    • There should be no automatic cleanup of old files via a cron job on this local filesystem.
    • Try to make sure the directory is unique (not used by anything else).

    NB. If you are going to install on a separate drive (due to lack of space on /), then you need to set some variables to point to that location. You will also need to bind mount it in the singularity command. Let's say that you drive is mounted in /srt. Then you change the relevant commands below to this:

    export EESSI_TMPDIR=/srt/$USER/EESSI\nmkdir -p $EESSI_TMPDIR\nmkdir /srt/tmp\nexport SINGULARITY_BIND=\"$EESSI_TMPDIR/var-run-cvmfs:/var/run/cvmfs,$EESSI_TMPDIR/var-lib-cvmfs:/var/lib/cvmfs,/srt/tmp:/tmp\"\nsingularity shell -B /srt --fusemount \"$EESSI_READONLY\" --fusemount \"$EESSI_WRITABLE_OVERLAY\" docker://ghcr.io/eessi/build-node:debian11\n

    We will assume that /tmp/$USER/EESSI meets these requirements:

    export EESSI_TMPDIR=/tmp/$USER/EESSI\nmkdir -p $EESSI_TMPDIR\n

    Create some subdirectories in this temporary directory:

    mkdir -p $EESSI_TMPDIR/{home,overlay-upper,overlay-work}\nmkdir -p $EESSI_TMPDIR/{var-lib-cvmfs,var-run-cvmfs}\n

    Configure Singularity cache directory, bind mounts, and (fake) home directory:

    export SINGULARITY_CACHEDIR=$EESSI_TMPDIR/singularity_cache\nexport SINGULARITY_BIND=\"$EESSI_TMPDIR/var-run-cvmfs:/var/run/cvmfs,$EESSI_TMPDIR/var-lib-cvmfs:/var/lib/cvmfs\"\nexport SINGULARITY_HOME=\"$EESSI_TMPDIR/home:/home/$USER\"\n

    Define values to pass to --fusemount` insingularity`` command:

    export EESSI_READONLY=\"container:cvmfs2 software.eessi.io /cvmfs_ro/software.eessi.io\"\nexport EESSI_WRITABLE_OVERLAY=\"container:fuse-overlayfs -o lowerdir=/cvmfs_ro/software.eessi.io -o upperdir=$EESSI_TMPDIR/overlay-upper -o workdir=$EESSI_TMPDIR/overlay-work /cvmfs/software.eessi.io\"\n

    Start the container (which includes Debian 11, CernVM-FS and fuse-overlayfs):

    singularity shell --fusemount \"$EESSI_READONLY\" --fusemount \"$EESSI_WRITABLE_OVERLAY\" docker://ghcr.io/eessi/build-node:debian10\n

    Once the container image has been downloaded and converted to a Singularity image (SIF format), you should get a prompt like this:

    ...\nCernVM-FS: loading Fuse module... done\n\nSingularity>\n

    and the EESSI CernVM-FS repository should be mounted:

    Singularity> ls /cvmfs/software.eessi.io\nhost_injections  README.eessi  versions\n
    "},{"location":"software_layer/build_nodes/#setting-up-the-environment","title":"Setting up the environment","text":"

    Set up the environment by starting a Gentoo Prefix session using the startprefix command.

    Make sure you use the correct version of the EESSI repository!

    export EESSI_VERSION='2023.06' \n/cvmfs/software.eessi.io/versions/${EESSI_VERSION}/compat/linux/$(uname -m)/startprefix\n
    "},{"location":"software_layer/build_nodes/#installing-software","title":"Installing software","text":"

    Clone the software-layer repository:

    git clone https://github.com/EESSI/software-layer.git\n

    Run the software installation script in software-layer:

    cd software-layer\n./EESSI-install-software.sh\n

    This script will figure out the CPU microarchitecture of the host automatically (like x86_64/intel/haswell).

    To build generic software installations (like x86_64/generic), use the --generic option:

    ./EESSI-install-software.sh --generic\n

    Once all missing software has been installed, you should see a message like this:

    No missing modules!\n
    "},{"location":"software_layer/build_nodes/#creating-tarball-to-ingest","title":"Creating tarball to ingest","text":"

    Before tearing down the build node, you should create tarball to ingest into the EESSI CernVM-FS repository.

    To create a tarball of all installations, assuming your build host is x86_64/intel/haswell:

    export EESSI_VERSION='2023.06'\ncd /cvmfs/software.eessi.io/versions/${EESSI_VERSION}/software/linux\neessi_tar_gz=\"$HOME/eessi-${EESSI_VERSION}-haswell.tar.gz\"\ntar cvfz ${eessi_tar_gz} x86_64/intel/haswell\n

    To create a tarball for specific installations, make sure you pick up both the software installation directories and the corresponding module files:

    eessi_tar_gz=\"$HOME/eessi-${EESSI_VERSION}-haswell-OpenFOAM.tar.gz\"\n\ntar cvfz ${eessi_tar_gz} x86_64/intel/haswell/software/OpenFOAM modules/all//OpenFOAM\n

    This tarball should be uploaded to the Stratum 0 server for ingestion. If needed, you can ask for help in the EESSI #software-layer Slack channel

    "},{"location":"software_layer/cpu_targets/","title":"CPU targets","text":"

    In the 2023.06 version of the EESSI repository, the following CPU microarchitectures are supported.

    • aarch64/generic: fallback for Arm 64-bit CPUs (like Raspberri Pi, etc.)
    • aarch64/neoverse_n1: AWS Graviton 2, Ampere Altra, ...
    • aarch64/neoverse_v1: AWS Graviton 3
    • x86_64/generic: fallback for older Intel + AMD CPUs (like Intel Sandy Bridge, ...)
    • x86_64/amd/zen2: AMD Rome
    • x86_64/amd/zen3: AMD Milan, AMD Milan X
    • x86_64/intel/haswell: Intel Haswell, Broadwell
    • x86_64/intel/skylake_avx512: Intel Skylake, Cascade Lake, Ice Lake, ...

    The names of these CPU targets correspond to the names used by archspec.

    "},{"location":"talks/20230615_aws_tech_short/","title":"Making scientific software EESSI - and fast","text":"

    AWS HPC Tech Short (~8 min.) - 15 June 2023

    "},{"location":"talks/2023/20230615_aws_tech_short/","title":"Making scientific software EESSI - and fast","text":"

    AWS HPC Tech Short (~8 min.) - 15 June 2023

    "},{"location":"talks/2023/20231027_packagingcon23_eessi/","title":"Streaming optimized scientific software installations on any Linux distro with EESSI","text":"
    • PackagingCon'2023 (Berlin, Germany) - 27 Oct 2023
    • presented by Kenneth Hoste & Lara Peeters (HPC-UGent)
    • slides (PDF)
    "},{"location":"talks/2023/20231204_cvmfs_hpc/","title":"Best Practices for CernVM-FS in HPC","text":"
    • online tutorial (~3h15min), 4 Dec 2023
    • presented by Kenneth Hoste (HPC-UGent)
    • tutorial website: https://multixscale.github.io/cvmfs-tutorial-hpc-best-practices
    • slides (PDF)
    "},{"location":"talks/2023/20231205_castiel2_eessi_intro/","title":"Streaming Optimised Scientific Software: an Introduction to EESSI","text":"
    • online tutorial (~1h40min) - 5 Dec 2023
    • presented by Alan O'Cais (CECAM)
    • slides (PDF)
    "},{"location":"test-suite/","title":"EESSI test suite","text":"

    The EESSI test suite is a collection of tests that are run using ReFrame. It is used to check whether the software installations included in the EESSI software layer are working and performing as expected.

    To get started, you should look into the installation and configuration guidelines first.

    To write the ReFrame configuration file for your system, check ReFrame configuration file.

    For which software tests are available, see available-tests.md.

    For more information on using the EESSI test suite, see here.

    See also release notes for the EESSI test suite.

    "},{"location":"test-suite/ReFrame-configuration-file/","title":"ReFrame configuration file","text":"

    In order for ReFrame to run tests on your system, it needs to know some properties about your system. For example, it needs to know what kind of job scheduler you have, which partitions the system has, how to submit to those partitions, etc. All of this has to be described in a ReFrame configuration file (see also the section on $RFM_CONFIG_FILES above).

    This page is organized as follows:

    • available ReFrame configuration file
    • Verifying your ReFrame configuration
    • How to write a ReFrame configuration file
    "},{"location":"test-suite/ReFrame-configuration-file/#available-reframe-configuration-file","title":"Available ReFrame configuration file","text":"

    There are some available ReFrame configuration files for HPC systems and public cloud in the config directory for more inspiration. Below is a simple ReFrame configuration file with minimal changes required for getting you started on using the test suite for a CPU partition. Please check that stagedir is set to a path on a (shared) scratch filesystem for storing (temporary) files related to the tests, and access is set to a list of arguments that you would normally pass to the scheduler when submitting to this partition (for example '-p cpu' for submitting to a Slurm partition called cpu).

    To write a ReFrame configuration file for your system, check the section How to write a ReFrame configuration file.

    \"\"\"\nsimple ReFrame configuration file\n\"\"\"\nimport os\n\nfrom eessi.testsuite.common_config import common_logging_config, common_eessi_init, format_perfvars, perflog_format\nfrom eessi.testsuite.constants import *  \n\nsite_configuration = {\n    'systems': [\n        {\n            'name': 'cpu_partition',\n            'descr': 'CPU partition',\n            'modules_system': 'lmod',\n            'hostnames': ['*'],\n            # Note that the stagedir should be a shared directory available on all nodes running ReFrame tests\n            'stagedir': f'/some/shared/dir/{os.environ.get(\"USER\")}/reframe_output/staging',\n            'partitions': [\n                {\n                    'name': 'cpu_partition',\n                    'descr': 'CPU partition',\n                    'scheduler': 'slurm',\n                    'launcher': 'mpirun',\n                    'access':  ['-p cpu', '--export=None'],\n                    'prepare_cmds': ['source %s' % common_eessi_init()],\n                    'environs': ['default'],\n                    'max_jobs': 4,\n                    'resources': [\n                        {\n                            'name': 'memory',\n                            'options': ['--mem={size}'],\n                        }\n                    ],\n                    'features': [\n                        FEATURES[CPU]\n                    ] + list(SCALES.keys()),\n                }\n            ]\n        },\n    ],\n    'environments': [\n        {\n            'name': 'default',\n            'cc': 'cc',\n            'cxx': '',\n            'ftn': '',\n        },\n    ],\n    'logging': common_logging_config(),\n    'general': [\n        {\n            # Enable automatic detection of CPU architecture for each partition\n            # See https://reframe-hpc.readthedocs.io/en/stable/configure.html#auto-detecting-processor-information\n            'remote_detect': True,\n        }\n    ],\n}\n\n# optional logging to syslog\nsite_configuration['logging'][0]['handlers_perflog'].append({\n    'type': 'syslog',\n    'address': '/dev/log',\n    'level': 'info',\n    'format': f'reframe: {perflog_format}',\n    'format_perfvars': format_perfvars,\n    'append': True,\n})\n
    "},{"location":"test-suite/ReFrame-configuration-file/#verifying-your-reframe-configuration","title":"Verifying your ReFrame configuration","text":"

    To verify the ReFrame configuration, you can query the configuration using --show-config.

    To see the full configuration, use:

    reframe --show-config\n

    To only show the configuration of a particular system partition, you can use the --system option. To query a specific setting, you can pass an argument to --show-config.

    For example, to show the configuration of the gpu partition of the example system:

    reframe --system example:gpu --show-config systems/0/partitions\n

    You can drill it down further to only show the value of a particular configuration setting.

    For example, to only show the launcher value for the gpu partition of the example system:

    reframe --system example:gpu --show-config systems/0/partitions/@gpu/launcher\n
    "},{"location":"test-suite/ReFrame-configuration-file/#how-to-write-a-reframe-configuration-file","title":"How to write a ReFrame configuration file","text":"

    The official ReFrame documentation provides the full description on configuring ReFrame for your site. However, there are some configuration settings that are specifically required for the EESSI test suite. Also, there are a large amount of configuration settings available in ReFrame, which makes the official documentation potentially a bit overwhelming.

    Here, we will describe how to create a configuration file that works with the EESSI test suite, starting from an example configuration file settings_example.py, which defines the most common configuration settings.

    "},{"location":"test-suite/ReFrame-configuration-file/#python-imports","title":"Python imports","text":"

    The EESSI test suite standardizes a few string-based values as constants, as well as the logging format used by ReFrame. Every ReFrame configuration file used for running the EESSI test suite should therefore start with the following import statements:

    from eessi.testsuite.common_config import common_logging_config, common_eessi_init\nfrom eessi.testsuite.constants import *\n
    "},{"location":"test-suite/ReFrame-configuration-file/#high-level-system-info-systems","title":"High-level system info (systems)","text":"

    First, we describe the system at its highest level through the systems keyword.

    You can define multiple systems in a single configuration file (systems is a Python list value). We recommend defining just a single system in each configuration file, as it makes the configuration file a bit easier to digest (for humans).

    An example of the systems section of the configuration file would be:

    site_configuration = {\n    'systems': [\n    # We could list multiple systems. Here, we just define one\n        {\n            'name': 'example',\n            'descr': 'Example cluster',\n            'modules_system': 'lmod',\n            'hostnames': ['*'],\n            'stagedir': f'/some/shared/dir/{os.environ.get(\"USER\")}/reframe_output/staging',\n            'partitions': [...],\n        }\n    ]\n}\n

    The most common configuration items defined at this level are:

    • name: The name of the system. Pick whatever makes sense for you.
    • descr: Description of the system. Again, pick whatever you like.
    • modules_system: The modules system used on your system. EESSI provides modules in lmod format. There is no need to change this, unless you want to run tests from the EESSI test suite with non-EESSI modules.
    • hostnames: The names of the hosts on which you will run the ReFrame command, as regular expression. Using these names, ReFrame can automatically determine which of the listed configurations in the systems list to use, which is useful if you're defining multiple systems in a single configuration file. If you follow our recommendation to limit yourself to one system per configuration file, simply define 'hostnames': ['*'].
    • prefix: Prefix directory for a ReFrame run on this system. Any directories or files produced by ReFrame will use this prefix, if not specified otherwise. We recommend setting the $RFM_PREFIX environment variable rather than specifying prefix in your configuration file, so our common logging configuration can pick up on it (see also $RFM_PREFIX).
    • stagedir: A shared directory that is available on all nodes that will execute ReFrame tests. This is used for storing (temporary) files related to the test. Typically, you want to set this to a path on a (shared) scratch filesystem. Defining this is optional: the default is a 'stage' directory inside the prefix directory.
    • partitions: Details on system partitions, see below.
    "},{"location":"test-suite/ReFrame-configuration-file/#partitions","title":"System partitions (systems.partitions)","text":"

    The next step is to add the system partitions to the configuration files, which is also specified as a Python list since a system can have multiple partitions.

    The partitions section of the configuration for a system with two Slurm partitions (one CPU partition, and one GPU partition) could for example look something like this:

    site_configuration = {\n    'systems': [\n        {\n            ...\n            'partitions': [\n                {\n                    'name': 'cpu_partition',\n                    'descr': 'CPU partition'\n                    'scheduler': 'slurm',\n                    'prepare_cmds': ['source %s' % common_eessi_init()],\n                    'launcher': 'mpirun',\n                    'access':  ['-p cpu'],\n                    'environs': ['default'],\n                    'max_jobs': 4,\n                    'features': [\n                        FEATURES[CPU]\n                    ] + list(SCALES.keys()),\n                },\n                {\n                    'name': 'gpu_partition',\n                    'descr': 'GPU partition'\n                    'scheduler': 'slurm',\n                    'prepare_cmds': ['source %s' % common_eessi_init()],\n                    'launcher': 'mpirun',\n                    'access':  ['-p gpu'],\n                    'environs': ['default'],\n                    'max_jobs': 4,\n                    'resources': [\n                        {\n                            'name': '_rfm_gpu',\n                            'options': ['--gpus-per-node={num_gpus_per_node}'],\n                        }\n                    ],\n                    'devices': [\n                        {\n                            'type': DEVICE_TYPES[GPU],\n                            'num_devices': 4,\n                        }\n                    ],\n                    'features': [\n                        FEATURES[CPU],\n                        FEATURES[GPU],\n                    ],\n                    'extras': {\n                        GPU_VENDOR: GPU_VENDORS[NVIDIA],\n                    },\n                },\n            ]\n        }\n    ]\n}\n

    The most common configuration items defined at this level are:

    • name: The name of the partition. Pick anything you like.
    • descr: Description of the partition. Again, pick whatever you like.
    • scheduler: The scheduler used to submit to this partition, for example slurm. All valid options can be found in the ReFrame documentation.
    • launcher: The parallel launcher used on this partition, for example mpirun or srun. All valid options can be found in the ReFrame documentation.
    • access: A list of arguments that you would normally pass to the scheduler when submitting to this partition (for example '-p cpu' for submitting to a Slurm partition called cpu). If supported by your scheduler, we recommend to not export the submission environment (for example by using '--export=None' with Slurm). This avoids test failures due to environment variables set in the submission environment that are passed down to submitted jobs.
    • prepare_cmds: Commands to execute at the start of every job that runs a test. If your batch scheduler does not export the environment of the submit host, this is typically where you can initialize the EESSI environment.
    • environs: The names of the programming environments (to be defined later in the configuration file via environments) that may be used on this partition. A programming environment is required for tests that are compiled first, before they can run. The EESSI test suite however only tests existing software installations, so no compilation (or specific programming environment) is needed. Simply specify 'environs': ['default'], since ReFrame requires that a default environment is defined.
    • max_jobs: The maximum amount of jobs ReFrame is allowed to submit in parallel. Some batch systems limit how many jobs users are allowed to have in the queue. You can use this to make sure ReFrame doesn't exceed that limit.
    • resources: This field defines how additional resources can be requested in a batch job. Specifically, on a GPU partition, you have to define a resource with the name '_rfm_gpu'. The options field should then contain the argument to be passed to the batch scheduler in order to request a certain number of GPUs per node, which could be different for different batch schedulers. For example, when using Slurm you would specify:
      'resources': [\n  {\n      'name': '_rfm_gpu',\n      'options': ['--gpus-per-node={num_gpus_per_node}'],\n  },\n],\n
    • processor: We recommend to NOT define this field, unless CPU autodetection is not working for you. The EESSI test suite relies on information about your processor topology to run. Using CPU autodetection is the easiest way to ensure that all processor-related information needed by the EESSI test suite are defined. Only if CPU autodetection is failing for you do we advice you to set the processor in the partition configuration as an alternative. Although additional fields might be used by future EESSI tests, at this point you'll have to specify at least the following fields:
      'processor': {\n    'num_cpus': 64,  # Total number of CPU cores in a node\n    'num_sockets': 2,  # Number of sockets in a node\n    'num_cpus_per_socket': 32,  # Number of CPU cores per socket\n    'num_cpus_per_core': 1,  # Number of hardware threads per CPU core\n}                 \n
    • features: The features field is used by the EESSI test suite to run tests only on a partition if it supports a certain feature (for example if GPUs are available). Feature names are standardized in the EESSI test suite in eessi.testsuite.constants.FEATURES dictionary. Typically, you want to define features: [FEATURES[CPU]] + list(SCALES.keys()) for CPU based partitions, and features: [FEATURES[GPU]] + list(SCALES.keys()) for GPU based partitions. The first tells the EESSI test suite that this partition can only run CPU-based tests, whereas second indicates that this partition can only run GPU-based tests. You can define a single partition to have both the CPU and GPU features (since features is a Python list). However, since the CPU-based tests will not ask your batch scheduler for GPU resources, this may fail on batch systems that force you to ask for at least one GPU on GPU-based nodes. Also, running CPU-only code on a GPU node is typically considered bad practice, thus testing its functionality is typically not relevant. The list(SCALES.keys()) adds all the scales that may be used by EESSI tests to the features list. These scales are defined in eessi.testsuite.constants.SCALES and define at which scales tests should be run, e.g. single core, half a node, a full node, two nodes, etc. This can be used to exclude running at certain scales on systems that would not support it. E.g. some systems might not support requesting multiple partial nodes, which is what the 1_cpn_2_nodes (1 core per node, on two nodes) and 1_cpn_4_nodes scales do. One could exclude these by setting e.g. features: [FEATURES[CPU]] + [s for s in SCALES if s not in ['1_cpn_2_nodes', '1_cpn_4_nodes']]. With this configuration setting, ReFrame will run all the scales listed in `eessi.testsuite.constants.SCALES except those two. In a similar way, one could exclude all multinode tests if one just has a single node available.
    • devices: This field specifies information on devices (for example) present in the partition. Device types are standardized in the EESSI test suite in the eessi.testsuite.constants.DEVICE_TYPES dictionary. This is used by the EESSI test suite to determine how many of these devices it can/should use per node. Typically, there is no need to define devices for CPU partitions. For GPU partitions, you want to define something like:
      'devices': {\n    'type': DEVICE_TYPES[GPU],\n    'num_devices': 4,  # or however many GPUs you have per node\n}\n
    • extras: This field specifies extra information on the partition, such as the GPU vendor. Valid fields for extras are standardized as constants in eessi.testsuite.constants (for example GPU_VENDOR). This is used by the EESSI test suite to decide if a partition can run a test that specifically requires a certain brand of GPU. Typically, there is no need to define extras for CPU partitions. For GPU partitions, you typically want to specify the GPU vendor, for example:
      'extras': {\n    GPU_VENDOR: GPU_VENDORS[NVIDIA]\n}\n

    Note that as more tests are added to the EESSI test suite, the use of features, devices and extras by the EESSI test suite may be extended, which may require an update of your configuration file to define newly recognized fields.

    Note

    Keep in mind that ReFrame partitions are virtual entities: they may or may not correspond to a partition as it is configured in your batch system. One might for example have a single partition in the batch system, but configure it as two separate partitions in the ReFrame configuration file based on additional constraints that are passed to the scheduler, see for example the AWS CitC example configuration.

    The EESSI test suite (and more generally, ReFrame) assumes the hardware within a partition defined in the ReFrame configuration file is homogeneous.

    "},{"location":"test-suite/ReFrame-configuration-file/#environments","title":"Environments","text":"

    ReFrame needs a programming environment to be defined in its configuration file for tests that need to be compiled before they are run. While we don't have such tests in the EESSI test suite, ReFrame requires some programming environment to be defined:

    site_configuration = {\n    ...\n    'environments': [\n        {\n            'name': 'default',  # Note: needs to match whatever we set for 'environs' in the partition\n            'cc': 'cc',\n            'cxx': '',\n            'ftn': '',\n        }\n    ]\n}\n

    Note

    The name here needs to match whatever we specified for the environs property of the partitions.

    "},{"location":"test-suite/ReFrame-configuration-file/#logging","title":"Logging","text":"

    ReFrame allows a large degree of control over what gets logged, and where. For convenience, we have created a common logging configuration in eessi.testsuite.common_config that provides a reasonable default. It can be used by importing common_logging_config and calling it as a function to define the 'logging setting:

    from eessi.testsuite.common_config import common_logging_config\n\nsite_configuration = {\n    ...\n    'logging':  common_logging_config(),\n}\n
    When combined by setting the $RFM_PREFIX environment variable, the output, performance log, and regular ReFrame logs will all end up in the directory specified by $RFM_PREFIX, which we recommend doing.

    Alternatively, a prefix can be passed as an argument like common_logging_config(prefix), which will control where the regular ReFrame log ends up. Note that the performance logs do not respect this prefix: they will still end up in the standard ReFrame prefix (by default the current directory, unless otherwise set with $RFM_PREFIX or --prefix).

    "},{"location":"test-suite/ReFrame-configuration-file/#cpu-auto-detection","title":"Auto-detection of processor information","text":"

    You can let ReFrame auto-detect the processor information for your system.

    ReFrame will automatically use auto-detection when two conditions are met:

    1. The partitions section of you configuration file does not specify processor information for a particular partition (as per our recommendation in the previous section);
    2. The remote_detect option is enabled in the general part of the configuration, as follows:
      site_configuration = {\n    'systems': ...\n    'logging': ...\n    'general': [\n        {\n            'remote_detect': True,\n        }\n    ]\n}\n

    To trigger the auto-detection of processor information, it is sufficient to let ReFrame list the available tests:

    reframe --list\n

    ReFrame will store the processor information for your system in ~/.reframe/topology/<system>-<partition>/processor.json.

    "},{"location":"test-suite/available-tests/","title":"Available tests","text":"

    The EESSI test suite currently includes tests for:

    • GROMACS
    • TensorFlow
    • OSU Micro-Benchmarks

    For a complete overview of all available tests in the EESSI test suite, see the eessi/testsuite/tests subdirectory in the EESSI/test-suite GitHub repository.

    "},{"location":"test-suite/available-tests/#gromacs","title":"GROMACS","text":"

    Several tests for GROMACS, a software package to perform molecular dynamics simulations, are included, which use the systems included in the HECBioSim benchmark suite:

    • Crambin (20K atom system)
    • Glutamine-Binding-Protein (61K atom system)
    • hEGFRDimer (465K atom system)
    • hEGFRDimerSmallerPL (465K atom system, only 10k steps)
    • hEGFRDimerPair (1.4M atom system)
    • hEGFRtetramerPair (3M atom system)

    It is implemented in tests/apps/gromacs.py, on top of the GROMACS test that is included in the ReFrame test library hpctestlib.

    To run this GROMACS test with all HECBioSim systems, use:

    reframe --run --name GROMACS\n

    To run this GROMACS test only for a specific HECBioSim system, use for example:

    reframe --run --name 'GROMACS.*HECBioSim/hEGFRDimerPair'\n

    To run this GROMACS test with the smallest HECBioSim system (Crambin), you can use the CI tag:

    reframe --run --name GROMACS --tag CI\n
    "},{"location":"test-suite/available-tests/#tensorflow","title":"TensorFlow","text":"

    A test for TensorFlow, a machine learning framework, is included, which is based on the \"Multi-worker training with Keras\" TensorFlow tutorial.

    It is implemented in tests/apps/tensorflow/.

    To run this TensorFlow test, use:

    reframe --run --name TensorFlow\n

    Warning

    This test requires TensorFlow v2.11 or newer, using an older TensorFlow version will not work!

    "},{"location":"test-suite/available-tests/#osumicrobenchmarks","title":"OSU Micro-Benchmarks","text":"

    A test for OSU Micro-Benchmarks, which provides an MPI benchmark.

    It is implemented in tests/apps/osu.py.

    To run this Osu Micro-Benchmark, use:

    reframe --run --name OSU-Micro-Benchmarks\n

    Warning

    This test requires OSU Micro-Benchmarks v5.9 or newer, using an older OSU -Micro-Benchmark version will not work!

    "},{"location":"test-suite/installation-configuration/","title":"Installing and configuring the EESSI test suite","text":"

    This page covers the requirements, installation and configuration of the EESSI test suite.

    "},{"location":"test-suite/installation-configuration/#requirements","title":"Requirements","text":"

    The EESSI test suite requires

    • Python >= 3.6
    • ReFrame v4.3.3 (or newer)
    • ReFrame test library (hpctestlib)
    "},{"location":"test-suite/installation-configuration/#installing-reframe","title":"Installing Reframe","text":"

    General instructions for installing ReFrame are available in the ReFrame documentation. To check if ReFrame is available, run the reframe command:

    reframe --version\n
    (for more details on the ReFrame version requirement, click here)

    Two important bugs were resolved in ReFrame's CPU autodetect functionality in version 4.3.3.

    We strongly recommend you use ReFrame >= 4.3.3.

    If you are using an older version of ReFrame, you may encounter some issues:

    • ReFrame will try to use the parallel launcher command configured for each partition (e.g. mpirun) when doing the remote autodetect. If there is no system-version of mpirun available, that will fail (see ReFrame issue #2926).
    • CPU autodetection only worked when using a clone of the ReFrame repository, not when it was installed with pip or EasyBuild (as is also the case for the ReFrame shipped with EESSI) (see ReFrame issue #2914).
    "},{"location":"test-suite/installation-configuration/#installing-reframe-test-library-hpctestlib","title":"Installing ReFrame test library (hpctestlib)","text":"

    The EESSI test suite requires that the ReFrame test library (hpctestlib) is available, which is currently not included in a standard installation of ReFrame.

    We recommend installing ReFrame using EasyBuild (version 4.8.1, or newer), or using a ReFrame installation that is available in the EESSI repository (version 2023.06, or newer).

    For example (using EESSI):

    source /cvmfs/software.eessi.io/versions/2023.06/init/bash\nmodule load ReFrame/4.3.3\n

    To check whether the ReFrame test library is available, try importing a submodule of the hpctestlib Python package:

    python3 -c 'import hpctestlib.sciapps.gromacs'\n
    "},{"location":"test-suite/installation-configuration/#installation","title":"Installation","text":"

    To install the EESSI test suite, you can either use pip or clone the GitHub repository directly:

    "},{"location":"test-suite/installation-configuration/#pip-install","title":"Using pip","text":"
    pip install git+https://github.com/EESSI/test-suite.git\n
    "},{"location":"test-suite/installation-configuration/#cloning-the-repository","title":"Cloning the repository","text":"
    git clone https://github.com/EESSI/test-suite $HOME/EESSI-test-suite\ncd EESSI-test-suite\nexport PYTHONPATH=$PWD:$PYTHONPATH\n
    "},{"location":"test-suite/installation-configuration/#verify-installation","title":"Verify installation","text":"

    To check whether the EESSI test suite installed correctly, try importing the eessi.testsuite Python package:

    python3 -c 'import eessi.testsuite'\n
    "},{"location":"test-suite/installation-configuration/#configuration","title":"Configuration","text":"

    Before you can run the EESSI test suite, you need to create a configuration file for ReFrame that is specific to the system on which the tests will be run.

    Example configuration files are available in the config subdirectory of the EESSI/test-suite GitHub repository](https://github.com/EESSI/test-suite/tree/main/config), which you can use as a template to create your own.

    "},{"location":"test-suite/installation-configuration/#configuring-reframe-environment-variables","title":"Configuring ReFrame environment variables","text":"

    We recommend setting a couple of $RFM_* environment variables to configure ReFrame, to avoid needing to include particular options to the reframe command over and over again.

    "},{"location":"test-suite/installation-configuration/#RFM_CONFIG_FILES","title":"ReFrame configuration file ($RFM_CONFIG_FILES)","text":"

    (see also RFM_CONFIG_FILES in ReFrame docs)

    Define the $RFM_CONFIG_FILES environment variable to instruct ReFrame which configuration file to use, for example:

    export RFM_CONFIG_FILES=$HOME/EESSI-test-suite/config/example.py\n

    Alternatively, you can use the --config-file (or -C) reframe option.

    See the section on the ReFrame configuration file below for more information.

    "},{"location":"test-suite/installation-configuration/#search-path-for-tests-rfm_check_search_path","title":"Search path for tests ($RFM_CHECK_SEARCH_PATH)","text":"

    (see also RFM_CHECK_SEARCH_PATH in ReFrame docs)

    Define the $RFM_CHECK_SEARCH_PATH environment variable to tell ReFrame which directory to search for tests.

    In addition, define $RFM_CHECK_SEARCH_RECURSIVE to ensure that ReFrame searches $RFM_CHECK_SEARCH_PATH recursively (i.e. so that also tests in subdirectories are found).

    For example:

    export RFM_CHECK_SEARCH_PATH=$HOME/EESSI-test-suite/eessi/testsuite/tests\nexport RFM_CHECK_SEARCH_RECURSIVE=1\n

    Alternatively, you can use the --checkpath (or -c) and --recursive (or -R) reframe options.

    "},{"location":"test-suite/installation-configuration/#RFM_PREFIX","title":"ReFrame prefix ($RFM_PREFIX)","text":"

    (see also RFM_PREFIX in ReFrame docs)

    Define the $RFM_PREFIX environment variable to tell ReFrame where to store the files it produces. E.g.

    export RFM_PREFIX=$HOME/reframe_runs\n

    This involves:

    • test output directories (which contain e.g. the job script, stderr and stdout for each of the test jobs)
    • staging directories (unless otherwise specified by staging, see below);
    • performance logs;

    Note that the default is for ReFrame to use the current directory as prefix. We recommend setting a prefix so that logs are not scattered around and nicely appended for each run.

    If our common logging configuration is used, the regular ReFrame log file will also end up in the location specified by $RFM_PREFIX.

    Warning

    Using the --prefix option in your reframe command is not equivalent to setting $RFM_PREFIX, since our common logging configuration only picks up on the $RFM_PREFIX environment variable to determine the location for the ReFrame log file.

    "},{"location":"test-suite/release-notes/","title":"Release notes for EESSI test suite","text":""},{"location":"test-suite/release-notes/#020-7-march-2024","title":"0.2.0 (7 march 2024)","text":"

    This is a minor release of the EESSI test-suite

    It includes:

    • Implement the CI for regular runs on a system (#93)
    • Add OSU tests and update the hooks and configs to make the tests portable (#54, #95, #96, #97, #110, #116, #117, #118, #121)
    • Add extra scales to filter tests(#94)
    • add new hook to filter out invalid scales based on features in the config (#111)
    • unify test names (#108)
    • updates to CI workflow ((#102, #103, #104, #105)
    • Update common_config (#114)
    • Add common config item to redirect the report file to the same directory as e.g. the perflog (#122)
    • Fix code formatting + enforce it in CI workflow (#120)

    Bug fixes:

    • Fix hook _assign_num_tasks_per_node (#98)
    • fix import common-config vsc_hortense (#99)
    • fix typo in partition names in configuration file for vsc_hortense (#106)
    "},{"location":"test-suite/release-notes/#010-5-october-2023","title":"0.1.0 (5 October 2023)","text":"

    Version 0.1.0 is the first release of the EESSI test suite.

    It includes:

    • A well-structured eessi.testsuite Python package that provides constants, utilities, hooks, and tests, which can be installed with \"pip install\".
    • Tests for GROMACS and TensorFlow in eessi.testsuite.tests.apps that leverage the functionality provided by eessi.testsuite.*.
    • Examples of ReFrame configuration files for various systems in the config subdirectory.
    • A common_logging_config() function to facilitate the ReFrame logging configuration.
    • A set of standard device types and features that can be used in the partitions section of the ReFrame configuration file.
    • A set of tags (CI + scale) that can be used to filter checks.
    • Scripts that show how to run the test suite.
    "},{"location":"test-suite/usage/","title":"Using the EESSI test suite","text":"

    This page covers the usage of the EESSI test suite.

    We assume you have already installed and configured the EESSI test suite on your system.

    "},{"location":"test-suite/usage/#listing-available-tests","title":"Listing available tests","text":"

    To list the tests that are available in the EESSI test suite, use reframe --list (or reframe -L for short).

    If you have properly configured ReFrame, you should see a (potentially long) list of checks in the output:

    $ reframe --list\n...\n[List of matched checks]\n- ...\nFound 123 check(s)\n

    Note

    When using --list, checks are only generated based on modules that are available in the system where the reframe command is invoked.

    The system partitions specified in your ReFrame configuration file are not taken into account when using --list.

    So, if --list produces an overview of 50 checks, and you have 4 system partitions in your configuration file, actually running the test suite may result in (up to) 200 checks being executed.

    "},{"location":"test-suite/usage/#dry-run","title":"Performing a dry run","text":"

    To perform a dry run of the EESSI test suite, use reframe --dry-run:

    $ reframe --dry-run\n...\n[==========] Running 1234 check(s)\n\n[----------] start processing checks\n[ DRY      ] GROMACS_EESSI ...\n...\n[----------] all spawned checks have finished\n\n[  PASSED  ] Ran 1234/1234 test case(s) from 1234 check(s) (0 failure(s), 0 skipped, 0 aborted)\n

    Note

    When using --dry-run, the systems partitions listed in your ReFrame configuration file are also taken into account when generating checks, next to available modules and test parameters, which is not the case when using --list.

    "},{"location":"test-suite/usage/#running-the-full-test-suite","title":"Running the (full) test suite","text":"

    To actually run the (full) EESSI test suite and let ReFrame produce a performance report, use reframe --run --performance-report.

    We strongly recommend filtering the checks that will be run by using additional options like --system, --name, --tag (see the 'Filtering tests' section below), and doing a dry run first to make sure that the generated checks correspond to what you have in mind.

    "},{"location":"test-suite/usage/#reframe-output-and-log-files","title":"ReFrame output and log files","text":"

    ReFrame will generate various output and log files:

    • a general ReFrame log file with debug logging on the ReFrame run (incl. selection of tests, generating checks, test results, etc.);
    • stage directories for each generated check, in which the checks are run;
    • output directories for each generated check, which include the test output;
    • performance log files for each test, which include performance results for the test runs;

    We strongly recommend controlling where these files go by using the common logging configuration that is provided by the EESSI test suite in your ReFrame configuration file and setting $RFM_PREFIX (avoid using the cmd line option --prefix).

    If you do, and if you use ReFrame v4.3.3 or more newer, you should find the output and log files at:

    • general ReFrame log file at $RFM_PREFIX/logs/reframe_<datestamp>_<timestamp>.log;
    • stage directories in $RFM_PREFIX/stage/<system>/<partition>/<environment>/;
    • output directories in $RFM_PREFIX/output/<system>/<partition>/<environment>/;
    • performance log files in $RFM_PREFIX/perflogs/<system>/<partition>/<environment>/;

    In the stage and output directories, there will be a subdirectory for each check that was run, which are tagged with a unique hash (like d3adb33f) that is determined based on the specific parameters for that check (see the ReFrame documentation for more details on the test naming scheme).

    "},{"location":"test-suite/usage/#filtering-tests","title":"Filtering tests","text":"

    By default, ReFrame will automatically generate checks for each system partition, based on the tests available in the EESSI test suite, available software modules, and tags defined in the EESSI test suite.

    To avoid being overwhelmed by checks, it is recommend to apply filters so ReFrame only generates the checks you are interested in.

    "},{"location":"test-suite/usage/#filter-name","title":"Filtering by test name","text":"

    You can filter checks based on the full test name using the --name option (or -n), which includes the value for all test parameters.

    Here's an example of a full test name:

    GROMACS_EESSI %benchmark_info=HECBioSim/Crambin %nb_impl=cpu %scale=1_node %module_name=GROMACS/2023.1-foss-2022a /d3adb33f @example:gpu+default\n

    To let ReFrame only generate checks for GROMACS, you can use:

    reframe --name GROMACS\n

    To only run GROMACS checks with a particular version of GROMACS, you can use --name to only retain specific GROMACS modules:

    reframe --name %module_name=GROMACS/2023.1\n

    Likewise, you can filter on any part of the test name.

    You can also select one specific check using the corresponding test hash, which is also part of the full test name (see /d3adb33f in the example above): for example:

    reframe --name /d3adb33f\n

    The argument passed to --name is interpreted as a Python regular expression, so you can use wildcards like .*, character ranges like [0-9], use ^ to specify that the pattern should match from the start of the test name, etc.

    Use --list or --dry-run to check the impact of using the --name option.

    "},{"location":"test-suite/usage/#filter-system-partition","title":"Filtering by system (partition)","text":"

    By default, ReFrame will generate checks for each system partition that is listed in your configuration file.

    To only let ReFrame checks for a particular system or system partition, you can use the --system option.

    For example:

    • To let ReFrame only generate checks for the system named example, use:
      reframe --system example ...\n
    • To let ReFrame only generate checks for the gpu partition of the system named example, use:
      reframe --system example:gpu ...\n

    Use --dry-run to check the impact of using the --system option.

    "},{"location":"test-suite/usage/#filter-tag","title":"Filtering by tags","text":"

    To filter tests using one or more tags, you can use the --tag option.

    Using --list-tags you can get a list of known tags.

    To check the impact of this on generated checks by ReFrame, use --list or --dry-run.

    "},{"location":"test-suite/usage/#ci-tag","title":"CI tag","text":"

    For each software that is included in the EESSI test suite, a small test is tagged with CI to indicate it can be used in a Continuous Integration (CI) environment.

    Hence, you can use this tag to let ReFrame only generate checks for small test cases:

    reframe --tag CI\n

    For example:

    $ reframe --name GROMACS --tag CI\n...\n
    "},{"location":"test-suite/usage/#scale-tags","title":"scale tags","text":"

    The EESSI test suite defines a set of custom tags that control the scale of checks, which specify many cores/GPUs/nodes should be used for running a check. The number of cores and GPUs serves as an upper limit; the actual count depends on the specific configuration of cores, GPUs, and sockets within the node, as well as the specific test being carried out.

    tag name description 1_core using 1 CPU core 1 GPU 2_cores using 2 CPU cores and 1 GPU 4_cores using 4 CPU cores and 1 GPU 1_cpn_2_nodes using 1 CPU core per node, 1 GPU per node, and 2 nodes 1_cpn_4_nodes using 1 CPU core per node, 1 GPU per node, and 4 nodes 1_8_node using 1/8th of a node (12.5% of available cores/GPUs, 1 at minimum) 1_4_node using a quarter of a node (25% of available cores/GPUs, 1 at minimum) 1_2_node using half of a node (50% of available cores/GPUs, 1 at minimum) 1_node using a full node (all available cores/GPUs) 2_nodes using 2 full nodes 4_nodes using 4 full nodes 8_nodes using 8 full nodes 16_nodes using 16 full nodes"},{"location":"test-suite/usage/#using-multiple-tags","title":"Using multiple tags","text":"

    To filter tests using multiple tags, you can:

    • use | as separator to indicate that one of the specified tags must match (logical OR, for example --tag='1_core|2_cores');
    • use the --tag option multiple times to indicate that all specified tags must match (logical AND, for example --tag CI --tag 1_core);
    "},{"location":"test-suite/usage/#example-commands","title":"Example commands","text":"

    Running all GROMACS tests on 4 cores on the cpu partition

    reframe --run --system example:cpu --name GROMACS --tag 4_cores --performance-report\n

    List all checks for TensorFlow 2.11 using a single node

    reframe --list --name %module_name=TensorFlow/2.11 --tag 1_node\n

    Dry run of TensorFlow CI checks on a quarter (1/4) of a node (on all system partitions)

    reframe --dry-run --name 'TensorFlow.*CUDA' --tag 1_4_node --tag CI\n
    "},{"location":"test-suite/usage/#overriding-test-parameters-advanced","title":"Overriding test parameters (advanced)","text":"

    You can override test parameters using the --setvar option (or -S).

    This can be done either globally (for all tests), or only for specific tests (which is recommended when using --setvar).

    For example, to run all GROMACS checks with a specific GROMACS module, you can use:

    reframe --setvar GROMACS_EESSI.modules=GROMACS/2023.1-foss-2022a ...\n

    Warning

    We do not recommend using --setvar, since it is quite easy to make unintended changes to test parameters this way that can result in broken checks.

    You should try filtering tests using the --name or --tag options instead.

    "},{"location":"using_eessi/basic_commands/","title":"Basic commands","text":""},{"location":"using_eessi/basic_commands/#basic-commands-to-access-software-provided-via-eessi","title":"Basic commands to access software provided via EESSI","text":"

    EESSI provides software through environment module files and Lmod.

    To see which modules (and extensions) are available, run:

    module avail\n

    Below is a short excerpt of the output produced by module avail, showing 10 modules only.

       PyYAML/5.3-GCCcore-9.3.0\n   Qt5/5.14.1-GCCcore-9.3.0\n   Qt5/5.15.2-GCCcore-10.3.0                               (D)\n   QuantumESPRESSO/6.6-foss-2020a\n   R-bundle-Bioconductor/3.11-foss-2020a-R-4.0.0\n   R/4.0.0-foss-2020a\n   R/4.1.0-foss-2021a                                      (D)\n   re2c/1.3-GCCcore-9.3.0\n   re2c/2.1.1-GCCcore-10.3.0                               (D)\n   RStudio-Server/1.3.1093-foss-2020a-Java-11-R-4.0.0\n

    Load modules with module load package/version, e.g., module load R/4.1.0-foss-2021a, and try out the software. See below for a short session

    [EESSI 2023.06] $ module load R/4.1.0-foss-2021a\n[EESSI 2021.06] $ which R\n/cvmfs/software.eessi.io/versions/2021.12/software/linux/x86_64/intel/skylake_avx512/software/R/4.1.0-foss-2021a/bin/R\n[EESSI 2023.06] $ R --version\nR version 4.1.0 (2021-05-18) -- \"Camp Pontanezen\"\nCopyright (C) 2021 The R Foundation for Statistical Computing\nPlatform: x86_64-pc-linux-gnu (64-bit)\n\nR is free software and comes with ABSOLUTELY NO WARRANTY.\nYou are welcome to redistribute it under the terms of the\nGNU General Public License versions 2 or 3.\nFor more information about these matters see\nhttps://www.gnu.org/licenses/.\n
    "},{"location":"using_eessi/building_on_eessi/","title":"Building software on top of EESSI","text":""},{"location":"using_eessi/building_on_eessi/#building-software-on-top-of-eessi-with-easybuild","title":"Building software on top of EESSI with EasyBuild","text":"

    Building on top of EESSI with EasyBuild is relatively straightforward. One crucial feature is that EasyBuild supports building against operating system libraries that are not in a standard prefix (such as /usr/lib). This is required when building against EESSI, since all of the software in EESSI is built against the compatibility layer.

    "},{"location":"using_eessi/building_on_eessi/#starting-the-eessi-software-environment","title":"Starting the EESSI software environment","text":"

    Start your environment as described here

    "},{"location":"using_eessi/building_on_eessi/#configure-easybuild","title":"Configure EasyBuild","text":"

    To configure EasyBuild, first, check out the EESSI software-layer repository. We advise you to check out the branch corresponding to the version of EESSI you would like to use.

    If you are unsure which version you are using, you can run

    echo ${EESSI_VERSION}\n
    to check it.

    To build on top of e.g. version 2023.06 of the EESSI software stack, we check it out, and go into that directory:

    git clone https://github.com/EESSI/software-layer/ --branch 2023.06\ncd software-layer\n
    Then, you have to pick a working directory (that you have write access to) where EasyBuild can do the build, and an install directory (with sufficient storage space), where EasyBuild can install it. In this example, we create a temporary directory in /tmp/ as our working directory, and use $HOME/.local/easybuild as our installpath:
    export WORKDIR=$(mktemp --directory --tmpdir=/tmp  -t eessi-build.XXXXXXXXXX)\nsource configure_easybuild\nexport EASYBUILD_INSTALLPATH=\"${HOME}/.local/easybuild\"\n
    Next, you load the EasyBuild module that you want to use, e.g.
    module load EasyBuild/4.8.2\n
    Finally, you can check the current configuration for EasyBuild using
    eb --show-config\n

    Note

    We use EasyBuild's default behaviour in optimizing for the host architecture. Since the EESSI initialization script also loads the EESSI stack that is optimized for your host architecture, this matches nicely. However, if you work on a cluster with heterogeneous node types, you have to realize you can only use these builds on the same architecture as where you build them. You can use different EASYBUILD_INSTALLPATHs if you want to build for different host architectures. For example, when you are on a system that has a mix of AMD zen3 and AMD zen4 nodes, you might want to use EASYBUILD_INSTALLPATH=$HOME/.local/easybuild/zen3 when building on a zen3 node, EASYBUILD_INSTALLPATH=$HOME/.local/easybuild/zen4 when building on a zen4 node. Then, in the step beloww, instead of the module use command listed there, you can use module use $HOME/.local/easybuild/zen3/modules/all when you want to run on a zen3 node and module use $HOME/.local/easybuild/zen4/modules/all when you want to run on a zen4 node.

    "},{"location":"using_eessi/building_on_eessi/#building","title":"Building","text":"

    Now, you are ready to build. For example, at the time of writing, netCDF-4.9.0-gompi-2022a.eb was not in the EESSI environment yet, so you can build it yourself:

    eb netCDF-4.9.0-gompi-2022a.eb\n

    Note

    If this netCDF module is available by the time you are trying, you can force a local rebuild by adding the --rebuild argument in order to experiment with building locally, or pick a different EasyConfig to build.

    "},{"location":"using_eessi/building_on_eessi/#using-the-newly-built-module","title":"Using the newly built module","text":"

    First, you'll need to add the subdirectory of the EASYBUILD_INSTALLPATH that contains the modules to the MODULEPATH. You can do that using:

    module use ${EASYBUILD_INSTALLPATH}/modules/all\n

    you may want to do this as part of your .bashrc.

    Note

    Be careful adding to the MODULEPATH in your .bashrc if you are on a cluster with heterogeneous architectures. You don't want to pick up on a module that was not compiled for the correct architectures accidentally.

    Since your module is built on top of the EESSI environment, that needs to be loaded first (as described here), if you haven't already done so.

    Finally, you should be able to load our newly build module:

    module load netCDF/4.9.0-gompi-2022a\n

    "},{"location":"using_eessi/building_on_eessi/#manually-building-software-op-top-of-eessi","title":"Manually building software op top of EESSI","text":"

    Building software on top of EESSI would require your linker to use the same system-dependencies as the software in EESSI does. In other words: it requires you to link against libraries from the compatibility layer, instead of from your host OS.

    While we plan to support this in the future, manually building on top of EESSI is currently not supported yet in a trivial way.

    "},{"location":"using_eessi/eessi_demos/","title":"Running EESSI demos","text":"

    To really experience how using EESSI can significantly facilitate the work of researchers, we recommend running one or more of the EESSI demos.

    First, clone the eessi-demo Git repository, and move into the resulting directory:

    git clone https://github.com/EESSI/eessi-demo.git\ncd eessi-demo\n

    The contents of the directory should be something like this:

    $ ls -l\ntotal 48\ndrwxrwxr-x 2 example users  4096 May 15 13:26 Bioconductor\ndrwxrwxr-x 2 example users  4096 May 15 13:26 ESPResSo\ndrwxrwxr-x 2 example users  4096 May 15 13:26 GROMACS\n-rw-rw-r-- 1 example users 18092 Dec  5  2022 LICENSE\ndrwxrwxr-x 2 example users  4096 May 15 13:26 OpenFOAM\n-rw-rw-r-- 1 example users   543 May 15 13:26 README.md\ndrwxrwxr-x 3 example users  4096 May 15 13:26 scripts\ndrwxrwxr-x 2 example users  4096 May 15 13:26 TensorFlow\n

    The directories we care about are those that correspond to particular scientific software, like Bioconductor, GROMACS, OpenFOAM, TensorFlow, ...

    Each of these contains a run.sh script that can be used to start a small example run with that software. Every example takes a couple of minutes to run, even with limited resources only.

    "},{"location":"using_eessi/eessi_demos/#example-running-tensorflow","title":"Example: running TensorFlow","text":"

    Let's try running the TensorFlow example.

    First, we need to make sure that our environment is set up to use EESSI:

    source /cvmfs/software.eessi.io/versions/2023.06/init/bash\n

    Change to the TensorFlow subdirectory of the eessi-demo Git repository, and execute the run.sh script:

    [EESSI 2023.06] $ cd TensorFlow\n[EESSI 2023.06] $ ./run.sh\n

    Shortly after starting the script you should see output as shown below, which indicates that GROMACS has started running:

    Epoch 1/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.2983 - accuracy: 0.9140\nEpoch 2/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.1444 - accuracy: 0.9563\nEpoch 3/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.1078 - accuracy: 0.9670\nEpoch 4/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.0890 - accuracy: 0.9717\nEpoch 5/5\n   1875/1875 [==============================] - 3s 1ms/step - loss: 0.0732 - accuracy: 0.9772\n313/313 - 0s - loss: 0.0679 - accuracy: 0.9790 - 391ms/epoch - 1ms/step\n\nreal   1m24.645s\nuser   0m16.467s\nsys    0m0.910s\n
    "},{"location":"using_eessi/setting_up_environment/","title":"Setting up your environment","text":"

    To set up the EESSI environment, simply run the command:

    source /cvmfs/software.eessi.io/versions/2023.06/init/bash\n

    This may take a while as data is downloaded from a Stratum 1 server which is part of the CernVM-FS infrastructure to distribute files. You should see the following output:

    Found EESSI repo @ /cvmfs/software.eessi.io/versions/2023.06!\narchdetect says x86_64/amd/zen2\nUsing x86_64/amd/zen2 as software subdirectory.\nUsing /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen2/modules/all as the directory to be added to MODULEPATH.\nFound Lmod configuration file at /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen2/.lmod/lmodrc.lua\nInitializing Lmod...\nPrepending /cvmfs/software.eessi.io/versions/2023.06/software/linux/x86_64/amd/zen2/modules/all to $MODULEPATH...\nEnvironment set up to use EESSI (2023.06), have fun!\n{EESSI 2023.06} [user@system ~]$ # (2)!\n
    1. What is reported here depends on the CPU architecture of the machine you are running the source command.
    2. This is the prompt indicating that you have access to the EESSI software stack.

    The last line is the shell prompt.

    Your environment is now set up, you are ready to start running software provided by EESSI!

    "},{"location":"blog/archive/2024/","title":"2024","text":""}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index d975e7034..7d4563908 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -125,6 +125,11 @@ 2024-06-04 daily + + https://eessi.github.io/docs/known_issues/eessi-2023.06/ + 2024-06-04 + daily + https://eessi.github.io/docs/meetings/2022-09-amsterdam/ 2024-06-04 diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 45637b36f..2998c0d07 100644 Binary files a/sitemap.xml.gz and b/sitemap.xml.gz differ diff --git a/software_layer/build_nodes/index.html b/software_layer/build_nodes/index.html index 1300b5cb2..feae2cb5f 100644 --- a/software_layer/build_nodes/index.html +++ b/software_layer/build_nodes/index.html @@ -14,7 +14,7 @@ - + @@ -1435,7 +1435,7 @@
  • - + diff --git a/software_layer/cpu_targets/index.html b/software_layer/cpu_targets/index.html index b202a19eb..8c399fc80 100644 --- a/software_layer/cpu_targets/index.html +++ b/software_layer/cpu_targets/index.html @@ -1358,7 +1358,7 @@
  • - + diff --git a/software_layer/index.html b/software_layer/index.html index afbb02ae6..34aa837d7 100644 --- a/software_layer/index.html +++ b/software_layer/index.html @@ -1358,7 +1358,7 @@
  • - + diff --git a/software_testing/index.html b/software_testing/index.html index e0f257c6b..b308fe8c6 100644 --- a/software_testing/index.html +++ b/software_testing/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/support/index.html b/support/index.html index 561e055f7..5938b0cb1 100644 --- a/support/index.html +++ b/support/index.html @@ -11,7 +11,7 @@ - + @@ -1346,7 +1346,7 @@
  • - + diff --git a/talks/2023/20230615_aws_tech_short/index.html b/talks/2023/20230615_aws_tech_short/index.html index 4def2d588..a7e1be67e 100644 --- a/talks/2023/20230615_aws_tech_short/index.html +++ b/talks/2023/20230615_aws_tech_short/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/talks/2023/20231027_packagingcon23_eessi/index.html b/talks/2023/20231027_packagingcon23_eessi/index.html index 78a5248d9..f4bb9f7a2 100644 --- a/talks/2023/20231027_packagingcon23_eessi/index.html +++ b/talks/2023/20231027_packagingcon23_eessi/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/talks/2023/20231204_cvmfs_hpc/index.html b/talks/2023/20231204_cvmfs_hpc/index.html index 26729458f..fbda611b5 100644 --- a/talks/2023/20231204_cvmfs_hpc/index.html +++ b/talks/2023/20231204_cvmfs_hpc/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/talks/2023/20231205_castiel2_eessi_intro/index.html b/talks/2023/20231205_castiel2_eessi_intro/index.html index fe2325fe5..5bf43514d 100644 --- a/talks/2023/20231205_castiel2_eessi_intro/index.html +++ b/talks/2023/20231205_castiel2_eessi_intro/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/talks/20230615_aws_tech_short/index.html b/talks/20230615_aws_tech_short/index.html index 531658659..927255912 100644 --- a/talks/20230615_aws_tech_short/index.html +++ b/talks/20230615_aws_tech_short/index.html @@ -1342,7 +1342,7 @@
  • - + diff --git a/talks/index.html b/talks/index.html index 5ef4c24d6..31577ce4e 100644 --- a/talks/index.html +++ b/talks/index.html @@ -1346,7 +1346,7 @@
  • - + diff --git a/test-suite/ReFrame-configuration-file/index.html b/test-suite/ReFrame-configuration-file/index.html index 2df3be24b..f9a4fc46d 100644 --- a/test-suite/ReFrame-configuration-file/index.html +++ b/test-suite/ReFrame-configuration-file/index.html @@ -1477,7 +1477,7 @@
  • - + diff --git a/test-suite/available-tests/index.html b/test-suite/available-tests/index.html index 4854e2153..887922633 100644 --- a/test-suite/available-tests/index.html +++ b/test-suite/available-tests/index.html @@ -1417,7 +1417,7 @@
  • - + diff --git a/test-suite/index.html b/test-suite/index.html index ab0e724ec..745036564 100644 --- a/test-suite/index.html +++ b/test-suite/index.html @@ -1360,7 +1360,7 @@
  • - + diff --git a/test-suite/installation-configuration/index.html b/test-suite/installation-configuration/index.html index 148996795..1034e00dd 100644 --- a/test-suite/installation-configuration/index.html +++ b/test-suite/installation-configuration/index.html @@ -1522,7 +1522,7 @@
  • - + diff --git a/test-suite/release-notes/index.html b/test-suite/release-notes/index.html index 84cdb50ef..9d9a0577f 100644 --- a/test-suite/release-notes/index.html +++ b/test-suite/release-notes/index.html @@ -1408,7 +1408,7 @@
  • - + diff --git a/test-suite/usage/index.html b/test-suite/usage/index.html index 4b5654422..1c1408b89 100644 --- a/test-suite/usage/index.html +++ b/test-suite/usage/index.html @@ -1519,7 +1519,7 @@
  • - + diff --git a/using_eessi/basic_commands/index.html b/using_eessi/basic_commands/index.html index 036d87845..e4b435316 100644 --- a/using_eessi/basic_commands/index.html +++ b/using_eessi/basic_commands/index.html @@ -1393,7 +1393,7 @@
  • - + diff --git a/using_eessi/building_on_eessi/index.html b/using_eessi/building_on_eessi/index.html index 3f5e98003..6bb16f0d8 100644 --- a/using_eessi/building_on_eessi/index.html +++ b/using_eessi/building_on_eessi/index.html @@ -1448,7 +1448,7 @@
  • - + diff --git a/using_eessi/eessi_demos/index.html b/using_eessi/eessi_demos/index.html index 958ccc108..9224aa994 100644 --- a/using_eessi/eessi_demos/index.html +++ b/using_eessi/eessi_demos/index.html @@ -1397,7 +1397,7 @@
  • - + diff --git a/using_eessi/setting_up_environment/index.html b/using_eessi/setting_up_environment/index.html index f78068976..97d3462ae 100644 --- a/using_eessi/setting_up_environment/index.html +++ b/using_eessi/setting_up_environment/index.html @@ -1358,7 +1358,7 @@
  • - +