diff --git a/.flake8 b/.flake8 index 7fcfdf8cd..24623e3f0 100644 --- a/.flake8 +++ b/.flake8 @@ -8,6 +8,10 @@ max-line-length = 88 select = C,E,F,W,B,B950 ignore = E501,W503,E203 + +per-file-ignores = + modifiers/*/modifier.py:F403,F405 + builtins = IPython exclude = .eggs, diff --git a/.github/workflows/bin/license b/.github/workflows/bin/license index 1dd37f974..7ec0e7355 100644 --- a/.github/workflows/bin/license +++ b/.github/workflows/bin/license @@ -24,7 +24,7 @@ licensed_files = [ r"^configs\/[^\/]*\/[^\/]*", r"^docs\/[^\/]*\.rst$", r"^experiments\/[^\/]*\/[^\/]*", - r"^repo\/[^\/]*\/[^\/]*", + r"^repo\/[^\/]*\/[^\/]*(? + top500-system-instances: + - Frontier (ORNL) + - LUMI (CSC) + - Tioga (LLNL) diff --git a/configs/CSC-LUMI-HPECray-zen3-MI250X-Slingshot/variables.yaml b/configs/CSC-LUMI-HPECray-zen3-MI250X-Slingshot/variables.yaml new file mode 100644 index 000000000..6c86bdd0e --- /dev/null +++ b/configs/CSC-LUMI-HPECray-zen3-MI250X-Slingshot/variables.yaml @@ -0,0 +1,17 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +variables: + gtl_flag: '' # to be overwritten by tests that need GTL + rocm_arch: 'gfx90a' + batch_time: '02:00' + mpi_command: 'srun -N {n_nodes} -n {n_ranks}' + batch_submit: 'sbatch {execute_experiment}' + batch_nodes: '#SBATCH -N {n_nodes}' + batch_ranks: '#SBATCH -n {n_ranks}' + batch_timeout: '#SBATCH -t {batch_time}:00' + cpu_partition: '#SBATCH -p small' + gpu_partition: '#SBATCH -p small-g' + diff --git a/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/auxiliary_software_files/compilers.yaml b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/auxiliary_software_files/compilers.yaml new file mode 100644 index 000000000..3b26506a0 --- /dev/null +++ b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/auxiliary_software_files/compilers.yaml @@ -0,0 +1,111 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +compilers: +- compiler: + spec: cce@12.0.3 + paths: + cc: cc + cxx: CC + f77: ftn + fc: ftn + flags: {} + operating_system: cnl7 + target: any + modules: + - PrgEnv-cray + - cce/12.0.3 + environment: {} + extra_rpaths: [] +- compiler: + spec: gcc@9.3.0 + paths: + cc: cc + cxx: CC + f77: ftn + fc: ftn + flags: {} + operating_system: cnl7 + target: any + modules: + - PrgEnv-gnu + - gcc/9.3.0 + environment: {} + extra_rpaths: [] +- compiler: + spec: gcc@10.3.0 + paths: + cc: cc + cxx: CC + f77: ftn + fc: ftn + flags: {} + operating_system: cnl7 + target: any + modules: + - PrgEnv-gnu + - gcc/10.3.0 + environment: {} + extra_rpaths: [] +- compiler: + spec: gcc@11.2.0 + paths: + cc: cc + cxx: CC + f77: ftn + fc: ftn + flags: {} + operating_system: cnl7 + target: any + modules: + - PrgEnv-gnu + - gcc/11.2.0 + environment: {} + extra_rpaths: [] +- compiler: + spec: intel@2021.3.0 + paths: + cc: cc + cxx: CC + f77: ftn + fc: ftn + flags: {} + operating_system: cnl7 + target: any + modules: + - PrgEnv-intel + - intel/2021.3.0 + environment: {} + extra_rpaths: [] +- compiler: + spec: nvhpc@21.3 + paths: + cc: cc + cxx: CC + f77: ftn + fc: ftn + flags: {} + operating_system: cnl7 + target: any + modules: + - PrgEnv-nvidia + - nvidia/21.3 + environment: {} + extra_rpaths: [] +- compiler: + spec: pgi@20.1.1 + paths: + cc: cc + cxx: CC + f77: ftn + fc: ftn + flags: {} + operating_system: cnl7 + target: any + modules: + - PrgEnv-pgi + - pgi/20.1.1 + environment: {} + extra_rpaths: [] diff --git a/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/auxiliary_software_files/packages.yaml b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/auxiliary_software_files/packages.yaml new file mode 100644 index 000000000..9d85d1a3d --- /dev/null +++ b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/auxiliary_software_files/packages.yaml @@ -0,0 +1,71 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +packages: + all: + providers: + mpi: + - cray-mpich + pkgconfig: + - pkg-config + - pkgconf + cuda: + externals: + - spec: cuda@11.0.207 + prefix: /usr/local/cuda-11.0 + - spec: cuda@11.1.0 + prefix: /usr/local/cuda-11.1 + - spec: cuda@11.2.0 + prefix: /usr/local/cuda-11.2 + - spec: cuda@10.2.89 + prefix: /opt/nvidia/cudatoolkit10.2/10.2.89_3.28-2.1__g52c0314 + pkg-config: + externals: + - spec: pkg-config@0.29.2 + prefix: /usr + r: + externals: + - spec: r@4.1.1.0 + modules: + - cray-R/4.1.1.0 + jemalloc: + externals: + - spec: jemalloc@5.1.0.3 + modules: + - cray-jemalloc/5.1.0.3 + cray-libsci: + externals: + - spec: cray-libsci@20.09.1 + modules: + - cray-libsci/20.09.1 + cray-mpich: + externals: + - spec: cray-mpich@7.7.18 + modules: + - cray-mpich/7.7.18 + netcdf-c: + externals: + - spec: netcdf-c@4.7.4.4+mpi+parallel-netcdf + modules: + - cray-netcdf-hdf5parallel/4.7.4.4 + petsc: + externals: + - spec: petsc@3.14.5.0~complex~cuda~int64 + modules: + - cray-petsc/3.14.5.0 + - spec: petsc@3.14.5.0~complex~cuda+int64 + modules: + - cray-petsc-64/3.14.5.0 + - spec: petsc@3.14.5.0+complex~cuda~int64 + modules: + - cray-petsc-complex/3.14.5.0 + - spec: petsc@3.14.5.0+complex~cuda+int64 + modules: + - cray-petsc-complex-64/3.14.5.0 + papi: + externals: + - spec: papi@6.0.0.9 + modules: + - papi/6.0.0.9 diff --git a/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/spack.yaml b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/spack.yaml new file mode 100644 index 000000000..17e5c1e3b --- /dev/null +++ b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/spack.yaml @@ -0,0 +1,13 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +spack: + packages: + default-compiler: + spack_spec: gcc@9.3.0 # cce@12.0.3 + default-mpi: + spack_spec: cray-mpich@7.7.18 + compiler-gcc: + spack_spec: gcc@9.3.0 diff --git a/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/system_definition.yaml b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/system_definition.yaml new file mode 100644 index 000000000..1535acc74 --- /dev/null +++ b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/system_definition.yaml @@ -0,0 +1,32 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +system_definition: + name: Piz Daint + site: CSCS + system: HPECray-haswell-P100-Infiniband + integrator: + vendor: HPECray + name: + processor: + vendor: Intel + name: Xeon-E5-2650v3 + ISA: x86_64 + uArch: haswell + accelerator: + vendor: NVIDIA + name: P100 + ISA: PTX + uArch: sm_60 + interconnect: + vendor: HPECray + name: Aries + system-tested: + site: CSCS + name: daint + installation-year: 2017 + description: top500 + top500-system-instances: + - Piz Daint (CSCS) diff --git a/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/variables.yaml b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/variables.yaml new file mode 100644 index 000000000..682ab5315 --- /dev/null +++ b/configs/CSCS-Daint-HPECray-haswell-P100-Infiniband/variables.yaml @@ -0,0 +1,15 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +variables: + batch_time: '02:00' + mpi_command: 'srun -N {n_nodes} -n {n_ranks}' + batch_submit: 'sbatch {execute_experiment}' + batch_nodes: '#SBATCH -N {n_nodes}' + batch_ranks: '#SBATCH -n {n_ranks}' + batch_timeout: '#SBATCH -t {batch_time}:00' + default_cuda_version: '11.2.0' + cuda_arch: '60' + enable_mps: '/usr/tcetmp/bin/enable_mps' diff --git a/configs/CSCS-Eiger-HPECray-zen2-Slingshot/auxiliary_software_files/packages.yaml b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/auxiliary_software_files/packages.yaml new file mode 100644 index 000000000..dfcbd8e48 --- /dev/null +++ b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/auxiliary_software_files/packages.yaml @@ -0,0 +1,14 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +packages: + all: + providers: + mpi: + - cray-mpich + cray-mpich: + externals: + - spec: cray-mpich@8.1.28 + prefix: /user-environment/env/default diff --git a/configs/CSCS-Eiger-HPECray-zen2-Slingshot/spack.yaml b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/spack.yaml new file mode 100644 index 000000000..1c055fe54 --- /dev/null +++ b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/spack.yaml @@ -0,0 +1,13 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +spack: + packages: + default-compiler: + spack_spec: gcc@12.3.0 + default-mpi: + spack_spec: cray-mpich@8.1.28 + compiler-gcc: + spack_spec: gcc@12.3.0 diff --git a/configs/CSCS-Eiger-HPECray-zen2-Slingshot/system_definition.yaml b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/system_definition.yaml new file mode 100644 index 000000000..68397e951 --- /dev/null +++ b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/system_definition.yaml @@ -0,0 +1,32 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +system_definition: + name: Eiger + site: CSCS + system: HPECray-zen2-Slingshot + integrator: + vendor: HPECray + name: + processor: + vendor: AMD + name: EPYC-7742 + ISA: x86_64 + uArch: zen2 + accelerator: + vendor: + name: + ISA: + uArch: + interconnect: + vendor: HPECray + name: Slingshot + system-tested: + site: CSCS + name: daint + installation-year: 2017 + description: top500 + top500-system-instances: + - diff --git a/configs/CSCS-Eiger-HPECray-zen2-Slingshot/variables.yaml b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/variables.yaml new file mode 100644 index 000000000..4e258c3bb --- /dev/null +++ b/configs/CSCS-Eiger-HPECray-zen2-Slingshot/variables.yaml @@ -0,0 +1,12 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +variables: + batch_time: '00:30' + mpi_command: 'srun -N {n_nodes} -n {n_ranks}' + batch_submit: 'sbatch {execute_experiment}' + batch_nodes: '#SBATCH -N {n_nodes}' + batch_ranks: '#SBATCH -n {n_ranks}' + batch_timeout: '#SBATCH -t {batch_time}:00' diff --git a/configs/LLNL-Magma-Penguin-icelake-OmniPath/auxiliary_software_files/packages.yaml b/configs/LLNL-Magma-Penguin-icelake-OmniPath/auxiliary_software_files/packages.yaml index 941b8a7a6..dc0af79db 100644 --- a/configs/LLNL-Magma-Penguin-icelake-OmniPath/auxiliary_software_files/packages.yaml +++ b/configs/LLNL-Magma-Penguin-icelake-OmniPath/auxiliary_software_files/packages.yaml @@ -28,6 +28,21 @@ packages: - spec: intel-oneapi-mkl@2022.1.0 prefix: /usr/tce/backend/installations/linux-rhel8-x86_64/intel-19.0.4/intel-oneapi-mkl-2022.1.0-sksz67twjxftvwchnagedk36gf7plkrp buildable: false + python: + externals: + - spec: python@3.9.12 + prefix: /usr/tce/packages/python/python-3.9.12/ + buildable: false + hwloc: + externals: + - spec: hwloc@2.9.1 + prefix: /usr + buildable: false + fftw: + externals: + - spec: fftw@3.3.10 + prefix: /usr/tce/packages/fftw/fftw-3.3.10 + buildable: false mpi: externals: - spec: mvapich2@2.3.7-gcc1211 diff --git a/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/auxiliary_software_files/compilers.yaml b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/auxiliary_software_files/compilers.yaml new file mode 100644 index 000000000..05a2af515 --- /dev/null +++ b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/auxiliary_software_files/compilers.yaml @@ -0,0 +1,32 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +compilers: +- compiler: + spec: gcc@=11.2.1 + paths: + cc: /usr/tce/packages/gcc/gcc-11.2.1/bin/gcc + cxx: /usr/tce/packages/gcc/gcc-11.2.1/bin/g++ + f77: /usr/tce/packages/gcc/gcc-11.2.1/bin/gfortran + fc: /usr/tce/packages/gcc/gcc-11.2.1/bin/gfortran + flags: {} + operating_system: rhel8 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] +- compiler: + spec: clang@=14.0.6 + paths: + cc: /usr/tce/packages/clang/clang-14.0.6/bin/clang + cxx: /usr/tce/packages/clang/clang-14.0.6/bin/clang++ + f77: /usr/tce/packages/gcc/gcc-11.2.1/bin/gfortran + fc: /usr/tce/packages/gcc/gcc-11.2.1/bin/gfortran + flags: {} + operating_system: rhel8 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] diff --git a/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/auxiliary_software_files/packages.yaml b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/auxiliary_software_files/packages.yaml new file mode 100644 index 000000000..1daf4b719 --- /dev/null +++ b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/auxiliary_software_files/packages.yaml @@ -0,0 +1,80 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +packages: + tar: + externals: + - spec: tar@1.30 + prefix: /usr + buildable: false + cmake: + externals: + - spec: cmake@3.23.1 + prefix: /usr/tce/packages/cmake/cmake-3.23.1 + buildable: false + gmake: + externals: + - spec: gmake@4.2.1 + prefix: /usr/tcetmp/packages/gmake/gmake-4.2.1 + buildable: false + automake: + externals: + - spec: automake@1.16.1 + prefix: /usr + autoconf: + externals: + - spec: autoconf@2.69 + prefix: /usr + mpi: + buildable: false + mvapich2: + buildable: false + externals: + - spec: mvapich2@2.3.7-gcc-11.2.1 + prefix: /usr/tce/packages/mvapich2/mvapich2-2.3.7-gcc-11.2.1 + - spec: mvapich2@2.3.7-intel-2021.6.0 + prefix: /usr/tce/packages/mvapich2/mvapich2-2.3.7-intel-classic-2021.6.0/ + - spec: mvapich2@2.3.7-clang-14.0.6 + prefix: /usr/tce/packages/mvapich2/mvapich2-2.3.7-clang-14.0.6 + cuda: + buildable: false + externals: + - spec: cuda@11.8.0 + prefix: /usr/tce/packages/cuda/cuda-11.8.0/ + curand: + externals: + - spec: curand@11.8.0 + prefix: /usr/tce/packages/cuda/cuda-11.8.0 + buildable: false + cusparse: + externals: + - spec: cusparse@11.8.0 + prefix: /usr/tce/packages/cuda/cuda-11.8.0 + buildable: false + cublas: + externals: + - spec: cublas@11.8.0 + prefix: /usr/tce/packages/cuda/cuda-11.8.0 + buildable: false + blas: + externals: + - spec: intel-oneapi-mkl@2022.1.0 + prefix: /usr/tce/packages/mkl/mkl-2022.1.0 + buildable: false + fftw: + externals: + - spec: intel-oneapi-mkl@2022.1.0 + prefix: /usr/tce/packages/mkl/mkl-2022.1.0 + buildable: false + lapack: + externals: + - spec: intel-oneapi-mkl@2022.1.0 + prefix: /usr/tce/packages/mkl/mkl-2022.1.0 + buildable: false + intel-oneapi-mkl: + externals: + - spec: intel-oneapi-mkl@2022.1.0 + prefix: /usr/tce/packages/mkl/mkl-2022.1.0/ + buildable: false diff --git a/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/spack.yaml b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/spack.yaml new file mode 100644 index 000000000..1f15be61d --- /dev/null +++ b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/spack.yaml @@ -0,0 +1,25 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +spack: + packages: + default-compiler: + spack_spec: gcc@=11.2.1 + default-mpi: + spack_spec: mvapich2@2.3.7-gcc-11.2.1 + compiler-gcc: + spack_spec: gcc@=11.2.1 + compiler-clang: + spack_spec: clang@=14.0.6 + mpi-clang: + spack_spec: mvapich2@2.3.7-clang-14.0.6 + blas: + spack_spec: intel-oneapi-mkl@2022.1.0 + cublas-cuda: + spack_spec: cublas@{default_cuda_version} + lapack: + spack_spec: intel-oneapi-mkl@2022.1.0 + fftw: + spack_spec: intel-oneapi-mkl@2022.1.0 diff --git a/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/system_definition.yaml b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/system_definition.yaml new file mode 100644 index 000000000..01d1688fe --- /dev/null +++ b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/system_definition.yaml @@ -0,0 +1,32 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +system_definition: + name: Pascal + site: LLNL + system: Penguin-broadwell-P100-OmniPath + integrator: + vendor: Penguin + name: + processor: + vendor: Intel + name: Xeon_E5-2695_v4 + ISA: x86_64 + uArch: broadwell + accelerator: + vendor: NVIDIA + name: P100 + ISA: PTX + uArch: sm_56 + interconnect: + vendor: Cornelis + name: OmniPath + system-tested: + site: LLNL + name: pascal + installation-year: 2018 + description: + top500-system-instances: + - diff --git a/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/variables.yaml b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/variables.yaml new file mode 100644 index 000000000..21097fef2 --- /dev/null +++ b/configs/LLNL-Pascal-Penguin-broadwell-P100-OmniPath/variables.yaml @@ -0,0 +1,15 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +variables: + gtl_flag: '' # to be overwritten by tests that need GTL + cuda_arch: '60' + default_cuda_version: '11.8.0' + batch_time: '02:00' + mpi_command: 'srun -N {n_nodes} -n {n_ranks}' + batch_submit: 'sbatch {execute_experiment}' + batch_nodes: '#SBATCH -N {n_nodes}' + batch_ranks: '#SBATCH -n {n_ranks} -G {n_ranks}' + batch_timeout: '#SBATCH -t {batch_time}:00' diff --git a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/compilers.yaml b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/compilers.yaml index 501522dcf..d27a632c1 100644 --- a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/compilers.yaml +++ b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/compilers.yaml @@ -65,3 +65,19 @@ compilers: modules: [] environment: {} extra_rpaths: [] +- compiler: + spec: clang@16.0.6-cuda11.8.0 + paths: + cc: /usr/tce/packages/clang/clang-16.0.6-cuda-11.8.0-gcc-11.2.1/bin/clang + cxx: /usr/tce/packages/clang/clang-16.0.6-cuda-11.8.0-gcc-11.2.1/bin/clang++ + f77: /usr/tce/packages/gcc/gcc-11.2.1/bin/gfortran + fc: /usr/tce/packages/gcc/gcc-11.2.1/bin/gfortran + flags: + cflags: -g -O2 + cxxflags: -g -O2 -std=c++17 + fflags: '' + operating_system: rhel7 + target: ppc64le + modules: [] + environment: {} + extra_rpaths: [] diff --git a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/packages.yaml b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/packages.yaml index dd43a66ba..61c85f62d 100644 --- a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/packages.yaml +++ b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/auxiliary_software_files/packages.yaml @@ -55,11 +55,21 @@ packages: - spec: cublas@11.8.0 prefix: /usr/tce/packages/cuda/cuda-11.8.0 buildable: false + fftw: + externals: + - spec: fftw@3.3.10 + prefix: /usr/tcetmp/packages/fftw/fftw-3.3.10-xl-2023.06.28 + buildable: false lapack: externals: - spec: lapack-xl@3.9.0 prefix: /usr/tcetmp/packages/lapack/lapack-3.9.0-xl-2020.03.18 buildable: false + python: + externals: + - spec: python@3.8.2 + prefix: /usr/tce/packages/python/python-3.8.2 + buildable: false mpi: externals: - spec: spectrum-mpi@2022.08.19-cuda-11.8.0 @@ -70,4 +80,8 @@ packages: prefix: /usr/tce/packages/spectrum-mpi/spectrum-mpi-rolling-release-xl-2022.08.19-cuda-10.1.243 extra_attributes: ldflags: "-lmpiprofilesupport -lmpi_ibm_usempi -lmpi_ibm_mpifh -lmpi_ibm" + - spec: spectrum-mpi@2022.08.19-clang16.0.6-cuda-11.8.0 + prefix: /usr/tce/packages/spectrum-mpi/spectrum-mpi-rolling-release-clang-16.0.6-cuda-11.8.0-gcc-11.2.1 + extra_attributes: + ldflags: "-lmpiprofilesupport -lmpi_ibm_usempi -lmpi_ibm_mpifh -lmpi_ibm" buildable: false diff --git a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/spack.yaml b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/spack.yaml index bdebba667..ed1edcb60 100644 --- a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/spack.yaml +++ b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/spack.yaml @@ -11,11 +11,15 @@ spack: spack_spec: spectrum-mpi@2022.08.19 compiler-gcc: spack_spec: gcc@8.3.1 - compiler-cuda10: - spack_spec: xl@16.1.1-2022.08.19-cuda{cuda10_version} - cublas-cuda-10: - spack_spec: cublas@{cuda10_version} - cublas-cuda-default: - spack_spec: cublas@{default_cuda_version} + compiler-clang: + spack_spec: clang@16.0.6-cuda{default_cuda_version} + mpi-clang: + spack_spec: spectrum-mpi@2022.08.19-clang16.0.6-cuda-{default_cuda_version} + blas: + spack_spec: cublas@11.8.0 + cublas-cuda: + spack_spec: cublas@{default_cuda_version} lapack: spack_spec: lapack-xl@3.9.0 + fftw: + spack_spec: fftw@3.3.10 diff --git a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/variables.yaml b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/variables.yaml index 717ed9d76..b85115bbc 100644 --- a/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/variables.yaml +++ b/configs/LLNL-Sierra-IBM-power9-V100-Infiniband/variables.yaml @@ -9,9 +9,7 @@ variables: mpi_command: '/usr/tcetmp/bin/lrun -n {n_ranks} -T {processes_per_node} {gtl_flag}' batch_submit: 'bsub -q pdebug {execute_experiment}' batch_nodes: '#BSUB -nnodes {n_nodes}' - batch_ranks: '#BSUB -nnodes {n_nodes}' + batch_ranks: '' batch_timeout: '#BSUB -W {batch_time}' - cuda10_version: '10.1.243' default_cuda_version: '11.8.0' cuda_arch: '70' - enable_mps: '/usr/tcetmp/bin/enable_mps' diff --git a/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/compilers.yaml b/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/compilers.yaml index e3a97b0ac..d7af09cc1 100644 --- a/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/compilers.yaml +++ b/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/compilers.yaml @@ -13,13 +13,29 @@ compilers: fc: /opt/cray/pe/cce/16.0.0/bin/crayftn flags: cflags: -g -O2 - cxxflags: -g -O2 -std=c++14 + cxxflags: -g -O2 -std=c++17 fflags: -g -O2 -hnopattern operating_system: rhel8 target: x86_64 modules: [] environment: {} extra_rpaths: [/opt/cray/pe/gcc-libs/] +- compiler: + spec: clang@16.0.0-rocm5.5.1 + paths: + cc: /opt/rocm-5.5.1/bin/amdclang + cxx: /opt/rocm-5.5.1/bin/amdclang++ + f77: /opt/rocm-5.5.1/bin/amdflang + fc: /opt/rocm-5.5.1/bin/amdflang + flags: + cflags: -g -O2 --gcc-toolchain=/opt/rh/gcc-toolset-12/root/usr + cxxflags: -g -O2 -std=c++17 --gcc-toolchain=/opt/rh/gcc-toolset-12/root/usr + fflags: -g -O2 -hnopattern + operating_system: rhel8 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] - compiler: spec: gcc@12.2.0 paths: diff --git a/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/packages.yaml b/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/packages.yaml index 93d45ceea..af604fd29 100644 --- a/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/packages.yaml +++ b/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/auxiliary_software_files/packages.yaml @@ -115,6 +115,11 @@ packages: externals: - spec: bison@3.0.4 prefix: /usr + python: + externals: + - spec: python@3.9.12 + prefix: /usr/tce/packages/python/python-3.9.12 + buildable: false rocprim: externals: - spec: rocprim@5.4.3 @@ -193,6 +198,20 @@ packages: prefix: /opt/cray/pe/libsci/23.05.1.4/gnu/10.3/x86_64/ lapack: buildable: false + hypre: + variants: amdgpu_target=gfx90a + hwloc: + externals: + - spec: hwloc@2.9.1 + prefix: /usr + buildable: false + fftw: + buildable: false + intel-oneapi-mkl: + externals: + - spec: intel-oneapi-mkl@2023.2.0 + prefix: /opt/intel/oneapi + buildable: false mpi: buildable: false cray-mpich: diff --git a/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/spack.yaml b/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/spack.yaml index eeaa9e70b..0b96b4d54 100644 --- a/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/spack.yaml +++ b/configs/LLNL-Tioga-HPECray-zen3-MI250X-Slingshot/spack.yaml @@ -11,10 +11,14 @@ spack: spack_spec: cray-mpich@8.1%cce ~gtl compiler-rocm: spack_spec: cce@16 + compiler-amdclang: + spack_spec: clang@16.0.0-rocm5.5.1 blas-rocm: spack_spec: rocblas@5.5 blas: spack_spec: rocblas@5.5 + lapack-rocm: + spack_spec: rocsolver@5.5 lapack: spack_spec: cray-libsci@23 mpi-rocm-gtl: @@ -23,3 +27,5 @@ spack: spack_spec: cray-mpich@8.1%cce ~gtl mpi-gcc: spack_spec: cray-mpich@8.1%gcc ~gtl + fftw: + spack_spec: intel-oneapi-mkl@2023.2.0 diff --git a/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/auxiliary_software_files/compilers.yaml b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/auxiliary_software_files/compilers.yaml new file mode 100644 index 000000000..25129f761 --- /dev/null +++ b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/auxiliary_software_files/compilers.yaml @@ -0,0 +1,136 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +compilers: +- compiler: + spec: fj@4.10.0 + modules: [] + paths: + cc: /opt/FJSVxtclanga/tcsds-1.2.38/bin/fcc + cxx: /opt/FJSVxtclanga/tcsds-1.2.38/bin/FCC + f77: /opt/FJSVxtclanga/tcsds-1.2.38/bin/frt + fc: /opt/FJSVxtclanga/tcsds-1.2.38/bin/frt + flags: {} + operating_system: rhel8 + target: aarch64 + environment: + set: + fcc_ENV: -Nclang + FCC_ENV: -Nclang + prepend_path: + PATH: /opt/FJSVxtclanga/tcsds-1.2.38/bin + LD_LIBRARY_PATH: /opt/FJSVxtclanga/tcsds-1.2.38/lib64 + extra_rpaths: [] +- compiler: + spec: fj@4.8.1 + modules: [] + paths: + cc: /opt/FJSVxtclanga/tcsds-1.2.36/bin/fcc + cxx: /opt/FJSVxtclanga/tcsds-1.2.36/bin/FCC + f77: /opt/FJSVxtclanga/tcsds-1.2.36/bin/frt + fc: /opt/FJSVxtclanga/tcsds-1.2.36/bin/frt + flags: {} + operating_system: rhel8 + target: aarch64 + environment: + set: + fcc_ENV: -Nclang + FCC_ENV: -Nclang + prepend_path: + PATH: /opt/FJSVxtclanga/tcsds-1.2.36/bin + LD_LIBRARY_PATH: /opt/FJSVxtclanga/tcsds-1.2.36/lib64 + extra_rpaths: [] +- compiler: + spec: fj@4.8.0 + modules: [] + paths: + cc: /opt/FJSVxtclanga/tcsds-1.2.35/bin/fcc + cxx: /opt/FJSVxtclanga/tcsds-1.2.35/bin/FCC + f77: /opt/FJSVxtclanga/tcsds-1.2.35/bin/frt + fc: /opt/FJSVxtclanga/tcsds-1.2.35/bin/frt + flags: {} + operating_system: rhel8 + target: aarch64 + environment: + set: + fcc_ENV: -Nclang + FCC_ENV: -Nclang + prepend_path: + PATH: /opt/FJSVxtclanga/tcsds-1.2.35/bin + LD_LIBRARY_PATH: /opt/FJSVxtclanga/tcsds-1.2.35/lib64 + extra_rpaths: [] +- compiler: + spec: fj@4.7.0 + modules: [] + paths: + cc: /opt/FJSVxtclanga/tcsds-1.2.34/bin/fcc + cxx: /opt/FJSVxtclanga/tcsds-1.2.34/bin/FCC + f77: /opt/FJSVxtclanga/tcsds-1.2.34/bin/frt + fc: /opt/FJSVxtclanga/tcsds-1.2.34/bin/frt + flags: {} + operating_system: rhel8 + target: aarch64 + environment: + set: + fcc_ENV: -Nclang + FCC_ENV: -Nclang + prepend_path: + PATH: /opt/FJSVxtclanga/tcsds-1.2.34/bin + LD_LIBRARY_PATH: /opt/FJSVxtclanga/tcsds-1.2.34/lib64 + extra_rpaths: [] +- compiler: + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + operating_system: rhel8 + target: aarch64 + modules: [] + environment: + unset: [] + extra_rpaths: [] + flags: {} + spec: gcc@8.5.0 +- compiler: + paths: + cc: /usr/bin/gcc + cxx: /usr/bin/g++ + f77: /usr/bin/gfortran + fc: /usr/bin/gfortran + operating_system: rhel8 + target: x86_64 + modules: [] + environment: + unset: [] + extra_rpaths: [] + flags: {} + spec: gcc@8.4.1 +- compiler: + spec: gcc@12.2.0 + paths: + cc: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-skylake_avx512/gcc-8.4.1/gcc-12.2.0-bjidm56oz6mejysl3wvjr3mbn7sqk3ec/bin/gcc + cxx: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-skylake_avx512/gcc-8.4.1/gcc-12.2.0-bjidm56oz6mejysl3wvjr3mbn7sqk3ec/bin//g++ + f77: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-skylake_avx512/gcc-8.4.1/gcc-12.2.0-bjidm56oz6mejysl3wvjr3mbn7sqk3ec/bin/gfortran + fc: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-skylake_avx512/gcc-8.4.1/gcc-12.2.0-bjidm56oz6mejysl3wvjr3mbn7sqk3ec/bin/gfortran + flags: {} + operating_system: rhel8 + target: x86_64 + modules: [] + environment: {} + extra_rpaths: [] +- compiler: + spec: gcc@12.2.0 + paths: + cc: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-a64fx/gcc-8.5.0/gcc-12.2.0-sxcx7kmt3qiktffgzzvrj2wmup3g32bc/bin/gcc + cxx: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-a64fx/gcc-8.5.0/gcc-12.2.0-sxcx7kmt3qiktffgzzvrj2wmup3g32bc/bin/g++ + f77: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-a64fx/gcc-8.5.0/gcc-12.2.0-sxcx7kmt3qiktffgzzvrj2wmup3g32bc/bin/gfortran + fc: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-a64fx/gcc-8.5.0/gcc-12.2.0-sxcx7kmt3qiktffgzzvrj2wmup3g32bc/bin/gfortran + flags: {} + operating_system: rhel8 + target: aarch64 + modules: [] + environment: {} + extra_rpaths: [] diff --git a/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/auxiliary_software_files/packages.yaml b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/auxiliary_software_files/packages.yaml new file mode 100644 index 000000000..591b98ebc --- /dev/null +++ b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/auxiliary_software_files/packages.yaml @@ -0,0 +1,591 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +packages: + all: + compiler: [fj, gcc] + providers: + mpi: [fujitsu-mpi, openmpi, mpich] + blas: [fujitsu-ssl2, openblas] + lapack: [fujitsu-ssl2, openblas] + scalapack: [fujitsu-ssl2, netlib-scalapack] + fftw-api: [fujitsu-fftw, fftw, rist-fftw] + permissions: + write: group + htslib: + version: [1.12] + python: + externals: + - spec: "python@3.10.8%fj +ssl arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-a64fx/fj-4.8.1/python-3.10.8-7q66snjvhvy7im57hncbgpirmddrb5sk + - spec: "python@3.10.8%fj +ssl+tkinter arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-a64fx/fj-4.8.1/python-3.10.8-5q3ncyl2my7oomopsmukduqo36u6pnkg + - spec: "python@3.10.8%gcc +ssl arch=linux-rhel8-cascadelake" + prefix: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-cascadelake/gcc-12.2.0/python-3.10.8-yt6afcnywa36aebxovs3ldscknyzlva3 + - spec: "python@3.10.8%gcc +ssl+tkinter arch=linux-rhel8-cascadelake" + prefix: /vol0004/apps/oss/spack-v0.19/opt/spack/linux-rhel8-cascadelake/gcc-12.2.0/python-3.10.8-a5u7ucknpzdipu2dd3d7hf2ybkdyprly + openssh: + permissions: + write: user + fujitsu-mpi: + version: [head, 4.8.1, 4.8.0, 4.7.0] + externals: + - spec: "fujitsu-mpi@head%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-mpi-latest + - spec: "fujitsu-mpi@4.8.1%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-mpi-1.2.36 + - spec: "fujitsu-mpi@4.8.0%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-mpi-1.2.35 + - spec: "fujitsu-mpi@4.7.0%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-1.2.34 + - spec: "fujitsu-mpi@head%gcc@12.2.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/mpigcc/fjmpi-gcc12 + - spec: "fujitsu-mpi@head%gcc@10.4.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/mpigcc/fjmpi-gcc10 + - spec: "fujitsu-mpi@head%gcc@8.5.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/mpigcc/fjmpi-gcc8 + buildable: False + fujitsu-ssl2: + version: [head, 4.8.1, 4.8.0, 4.7.0] + externals: + - spec: "fujitsu-ssl2@head%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-ssl2-latest + - spec: "fujitsu-ssl2@4.8.1%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-ssl2-1.2.36 + - spec: "fujitsu-ssl2@4.8.0%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-ssl2-1.2.35 + - spec: "fujitsu-ssl2@4.7.0%fj arch=linux-rhel8-a64fx" + prefix: /opt/FJSVxtclanga/tcsds-1.2.34 + buildable: False + rist-fftw: + externals: + - spec: "rist-fftw@3.3.9-272-g63d6bd70 arch=linux-rhel8-a64fx" + prefix: /vol0004/share/rist/fftw/gcc-10.3.0/3.3.9-272-g63d6bd70 + buildable: False + ntchem: + externals: + - spec: "ntchem@13.0.0%fj@4.8.1 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/ntchem/aarch64/NTChem2013/13.0.0/tcsds-1.2.36/release_mpiomp + - spec: "ntchem@12.2.0%fj arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/ntchem/aarch64/NTChem2013/12.2.0/tcsds-1.2.34/release_mpiomp + buildable: False + abinitmp: + externals: + - spec: "abinitmp@1-22%fj@4.8.1 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/SPACK-Feb2023-ABINIT-MP-VER1-REV22 + - spec: "abinitmp@2-4%fj@4.8.1 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/SPACK-Feb2023-ABINIT-MP-VER2-REV4 + - spec: "abinitmp@2-8%fj@4.8.1 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/SPACK-Jan2024-ABINIT-MP-VER2-REV8 + buildable: False + fds: + externals: + - spec: "fds@6.7.7%fj@4.8.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/FDS/fds-FDS6.7.7/Build/mpi_fugaku + - spec: "fds@6.7.9%fj@4.8.1 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/FDS/fds-FDS6.7.9/Build/mpi_fugaku + buildable: False + ffvhc-ace: + externals: + - spec: "ffvhc-ace@0.1%fj@4.8.1 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/FFVHC-ACE/0.1/bin + buildable: False + quantum-espresso: + externals: + - spec: "quantum-espresso@6.5%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-6.5/bin + - spec: "quantum-espresso@6.6%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-6.6/bin + - spec: "quantum-espresso@6.7%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-6.7/bin + - spec: "quantum-espresso@6.8%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-6.8/bin + - spec: "quantum-espresso@7.0%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-7.0/bin + - spec: "quantum-espresso@7.1%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-7.1/bin + - spec: "quantum-espresso@7.2%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-7.2/bin + - spec: "quantum-espresso@7.3%fj@4.10.0 arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/opt/qe-7.3/bin + buildable: False + autoconf: + externals: + - spec: "autoconf@2.69 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "autoconf@2.69 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "autoconf@2.69 arch=linux-rhel8-skylake_avx512" + prefix: /usr + automake: + externals: + - spec: "automake@1.16.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "automake@1.16.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "automake@1.16.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + binutils: + externals: + - spec: "binutils@2.30 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "binutils@2.30 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "binutils@2.30 arch=linux-rhel8-skylake_avx512" + prefix: /usr + bzip2: + externals: + - spec: "bzip2@1.0.6 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "bzip2@1.0.6 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "bzip2@1.0.6 arch=linux-rhel8-skylake_avx512" + prefix: /usr + curl: + externals: + - spec: "curl@7.61.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "curl@7.61.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "curl@7.61.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + dbus: + externals: + - spec: "dbus@1.12.8 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "dbus@1.12.8 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "dbus@1.12.8 arch=linux-rhel8-skylake_avx512" + prefix: /usr + elfutils: + externals: + - spec: "elfutils@0.186 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "elfutils@0.182 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "elfutils@0.182 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "elfutils@0.182 arch=linux-rhel8-skylake_avx512" + prefix: /usr + expat: + externals: + - spec: "expat@2.2.5 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "expat@2.2.5 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "expat@2.2.5 arch=linux-rhel8-skylake_avx512" + prefix: /usr + findutils: + externals: + - spec: "findutils@4.6.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "findutils@4.6.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "findutils@4.6.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + fontconfig: + externals: + - spec: "fontconfig@2.13.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "fontconfig@2.13.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "fontconfig@2.13.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + freetype: + externals: + - spec: "freetype@2.9.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "freetype@2.9.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "freetype@2.9.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + gmake: + externals: + - spec: "gmake@4.2.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "gmake@4.2.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "gmake@4.2.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + gdbm: + externals: + - spec: "gdbm@1.18 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "gdbm@1.18 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "gdbm@1.18 arch=linux-rhel8-skylake_avx512" + prefix: /usr + gettext: + externals: + - spec: "gettext@0.19.8.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "gettext@0.19.8.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "gettext@0.19.8.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + gmp: + externals: + - spec: "gmp@6.1.2 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "gmp@6.1.2 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "gmp@6.1.2 arch=linux-rhel8-skylake_avx512" + prefix: /usr + gnutls: + externals: + - spec: "gnutls@3.6.16 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "gnutls@3.6.14 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "gnutls@3.6.14 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "gnutls@3.6.14 arch=linux-rhel8-skylake_avx512" + prefix: /usr + hwloc: + externals: + - spec: "hwloc@2.2.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "hwloc@2.2.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "hwloc@2.2.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + jansson: + externals: + - spec: "jansson@2.14 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "jansson@2.11 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "jansson@2.11 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libaio: + externals: + - spec: "libaio@0.3.112 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libaio@0.3.112 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libaio@0.3.112 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libcap: + externals: + - spec: "libcap@2.48 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libcap@2.26 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libcap@2.26 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libdrm: + externals: + - spec: "libdrm@2.4.108 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libdrm@2.4.103 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libdrm@2.4.103 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libdrm@2.4.103 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libedit: + externals: + - spec: "libedit@3.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libedit@3.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libedit@3.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libevent: + externals: + - spec: "libevent@2.1.8 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libevent@2.1.8 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libevent@2.1.8 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libfabric: + externals: + - spec: "libfabric@1.14.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libfabric@1.11.2 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libfabric@1.11.2 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libffi: + externals: + - spec: "libffi@3.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libffi@3.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libffi@3.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libglvnd: + externals: + - spec: "libglvnd@1.3.4 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libglvnd@1.3.2 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libglvnd@1.3.2 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libibumad: + externals: + - spec: "libibumad@37.2 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libibumad@32.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libibumad@54mlnx1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libibumad@54mlnx1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libpciaccess: + externals: + - spec: "libpciaccess@0.14 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libpciaccess@0.14 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libpciaccess@0.14 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libpng: + externals: + - spec: "libpng@1.6.34 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libpng@1.6.34 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libpng@1.6.34 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libtasn1: + externals: + - spec: "libtasn1@4.13 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libtasn1@4.13 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libtasn1@4.13 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libtirpc: + externals: + - spec: "libtirpc@1.1.4 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libtirpc@1.1.4 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libtirpc@1.1.4 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libtool: + externals: + - spec: "libtool@2.4.6 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libtool@2.4.6 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libtool@2.4.6 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libuuid: + externals: + - spec: "libuuid@2.32.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libuuid@2.32.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libuuid@2.32.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libxcb: + externals: + - spec: "libxcb@1.13.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libxcb@1.13.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libxcb@1.13.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libxkbcommon: + externals: + - spec: "libxkbcommon@0.9.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libxkbcommon@0.9.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libxkbcommon@0.9.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + libxml2: + externals: + - spec: "libxml2@2.9.7 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "libxml2@2.9.7 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "libxml2@2.9.7 arch=linux-rhel8-skylake_avx512" + prefix: /usr + lz4: + externals: + - spec: "lz4@1.8.3 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "lz4@1.8.3 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "lz4@1.8.3 arch=linux-rhel8-skylake_avx512" + prefix: /usr + m4: + externals: + - spec: "m4@1.4.18 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "m4@1.4.18 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "m4@1.4.18 arch=linux-rhel8-skylake_avx512" + prefix: /usr + ncurses: + externals: + - spec: "ncurses@6.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "ncurses@6.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "ncurses@6.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + nettle: + externals: + - spec: "nettle@3.4.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "nettle@3.4.1 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "nettle@3.4.1 arch=linux-rhel8-skylake_avx512" + prefix: /usr + nspr: + externals: + - spec: "nspr@4.32.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "nspr@4.25.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "nspr@4.25.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "nspr@4.25.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + numactl: + externals: + - spec: "numactl@2.0.12 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "numactl@2.0.12 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "numactl@2.0.12 arch=linux-rhel8-skylake_avx512" + prefix: /usr + opengl: + buildable: False + externals: + - spec: opengl@4.5.0 + prefix: /usr + openssl: + buildable: False + externals: + - spec: "openssl@1.1.1k arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "openssl@1.1.1g arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "openssl@1.1.1k arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "openssl@1.1.1k arch=linux-rhel8-skylake_avx512" + prefix: /usr + papi: + externals: + - spec: "papi@5.6.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "papi@5.6.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "papi@5.6.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + pcre: + externals: + - spec: "pcre@8.42 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "pcre@8.42 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "pcre@8.42 arch=linux-rhel8-skylake_avx512" + prefix: /usr + perl: + externals: + - spec: "perl@5.26.3 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "perl@5.26.3 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "perl@5.26.3 arch=linux-rhel8-skylake_avx512" + prefix: /usr + pkgconf: + externals: + - spec: "pkgconf@1.4.2 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "pkgconf@1.4.2 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "pkgconf@1.4.2 arch=linux-rhel8-skylake_avx512" + prefix: /usr + popt: + externals: + - spec: "popt@1.18 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "popt@1.18 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "popt@1.18 arch=linux-rhel8-skylake_avx512" + prefix: /usr + readline: + externals: + - spec: "readline@7.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "readline@7.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "readline@7.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + sqlite: + externals: + - spec: "sqlite@3.26.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "sqlite@3.26.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "sqlite@3.26.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + tcl: + externals: + - spec: "tcl@8.6.8 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "tcl@8.6.8 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "tcl@8.6.8 arch=linux-rhel8-skylake_avx512" + prefix: /usr + ucx: + externals: + - spec: "ucx@1.11.2 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "ucx@1.9.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "ucx@1.11.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "ucx@1.11.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + valgrind: + externals: + - spec: "valgrind@3.18.1 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "valgrind@3.16.0 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "valgrind@3.16.0 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "valgrind@3.16.0 arch=linux-rhel8-skylake_avx512" + prefix: /usr + xz: + externals: + - spec: "xz@5.2.4 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "xz@5.2.4 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "xz@5.2.4 arch=linux-rhel8-skylake_avx512" + prefix: /usr + zlib: + externals: + - spec: "zlib@1.2.11 arch=linux-rhel8-a64fx" + prefix: /usr + - spec: "zlib@1.2.11 arch=linux-rhel8-cascadelake" + prefix: /usr + - spec: "zlib@1.2.11 arch=linux-rhel8-skylake_avx512" + prefix: /usr + buildable: False + # pmlib: had problems with spack. so far binary packages only. 2023/3/20 mikami + pmlib: + externals: + - spec: "pmlib@9.0-clang-precise arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/pmlib-v9.0/9.0-clang-precise + - spec: "pmlib@9.0-clang-power arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/pmlib-v9.0/9.0-clang-power + - spec: "pmlib@9.0-trad-power arch=linux-rhel8-a64fx" + prefix: /vol0004/apps/oss/pmlib-v9.0/9.0-trad-power + buildable: False + cmake: + externals: + - spec: "cmake@3.20.2 arch=linux-rhel8-a64fx" + prefix: /usr + diff --git a/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/spack.yaml b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/spack.yaml new file mode 100644 index 000000000..81facd979 --- /dev/null +++ b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/spack.yaml @@ -0,0 +1,25 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +spack: + packages: + default-compiler: + spack_spec: fj@{default_fj_version} + default-mpi: + spack_spec: fujitsu-mpi@{default_fj_version}%fj arch=linux-rhel8-a64fx + compiler-gcc: + spack_spec: gcc@{default_gnu_version} + compiler-fujitsu: + spack_spec: fj@{default_fj_version} + blas: + spack_spec: fujitsu-ssl2@head%fj arch=linux-rhel8-a64fx + lapack: + spack_spec: fujitsu-ssl2@head%fj arch=linux-rhel8-a64fx + mpi-gcc: + spack_spec: fujitsu-mpi@head%gcc@{default_gnu_version} arch=linux-rhel8-a64fx + mpi-fujitsu: + spack_spec: fujitsu-mpi@{default_fj_version}%fj arch=linux-rhel8-a64fx + gmake: + spack_spec: gmake@4.2.1 arch=linux-rhel8-a64fx diff --git a/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/system_definition.yaml b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/system_definition.yaml new file mode 100644 index 000000000..afdbd6d63 --- /dev/null +++ b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/system_definition.yaml @@ -0,0 +1,32 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +system_definition: + name: Fugaku + site: RIKEN Center for Computational Science + system: Fujitsu-A64FX-TofuD + integrator: + vendor: Fujitsu + name: FX1000 + processor: + vendor: Fujitsu + name: A64FX + ISA: Armv8.2-A-SVE + uArch: aarch64 + accelerator: + vendor: + name: + ISA: + uArch: + interconnect: + vendor: Fujitsu + name: TofuInterconnectD + system-tested: + site: R-CCS + name: Fugaku + installation-year: 2020 + description: top500 + top500-system-instances: + - Fugaku (R-CCS) \ No newline at end of file diff --git a/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/variables.yaml b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/variables.yaml new file mode 100644 index 000000000..b4db683c2 --- /dev/null +++ b/configs/RCCS-Fugaku-Fujitsu-A64FX-TofuD/variables.yaml @@ -0,0 +1,14 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +variables: + batch_time: '02:00' + mpi_command: 'mpiexec' + batch_submit: 'pjsub {execute_experiment}' + batch_nodes: '#PJM -L "node={n_nodes}"' + batch_ranks: '#PJM --mpi proc={n_ranks}' + batch_timeout: '#PJM -L "elapse={batch_time}:00" -x PJM_LLIO_GFSCACHE="/vol0001:/vol0002:/vol0003:/vol0004:/vol0005:/vol0006"' + default_fj_version: '4.8.1' + default_gnu_version: '12.2.0' diff --git a/docs/add-a-site-specific-system-config.rst b/docs/add-a-site-specific-system-config.rst index d4d3b8a1a..317c9b5a6 100644 --- a/docs/add-a-site-specific-system-config.rst +++ b/docs/add-a-site-specific-system-config.rst @@ -3,9 +3,9 @@ SPDX-License-Identifier: Apache-2.0 -============================= +====================================== Adding a Specific System Configuration -============================= +====================================== For a specific system, one can (optionally) add more information about the software installed on the system by adding Spack config files in ``benchpark/configs/$SITE/SYSTEMNAME-GENERICSYSTEM/auxiliary_software_files/``. diff --git a/docs/add-a-system-config.rst b/docs/add-a-system-config.rst index ecd4f9dbd..c975001d5 100644 --- a/docs/add-a-system-config.rst +++ b/docs/add-a-system-config.rst @@ -111,4 +111,4 @@ spack section in the `Ramble configuration file batch_timeout: '' If defining a specific system, one can be more specific with available software versions -and packages, as demonstrated in :doc:`add-a-specific-system-config.rst`. +and packages, as demonstrated in :doc:`add-a-site-specific-system-config`. diff --git a/docs/tables/current-system-definitions.csv b/docs/tables/current-system-definitions.csv index f4972971b..38c574238 100644 --- a/docs/tables/current-system-definitions.csv +++ b/docs/tables/current-system-definitions.csv @@ -1,20 +1,20 @@ -,Magma,Sierra,Tioga,AWS_PCluster_Hpc7a-zen4-EFA,HPECray-zen3-MI250X-Slingshot,x86_64 -**site**,LLNL,LLNL,LLNL,,, -**system**,Penguin-icelake-OmniPath,IBM-power9-V100-Infiniband,HPECray-zen3-MI250X-Slingshot,AWS_PCluster_Hpc7a-zen4-EFA,HPECray-zen3-MI250X-Slingshot,x86_64 -**integrator.vendor**,PenguinComputing,IBM,HPECray,AWS,HPECray, -**integrator.name**,RelionCluster,AC922,EX235a,ParallelCluster3.7.2-Hpc7a,EX235a, -**processor.vendor**,Intel,IBM,AMD,AMD,AMD, -**processor.name**,XeonPlatinum924248C,POWER9,EPYC-Zen3,EPYC-Zen4,EPYC-Zen3, -**processor.ISA**,x86_64,ppc64le,x86_64,x86_64,x86_64,x86_64 -**processor.uArch**,icelake,power9,zen3,zen4,zen3, -**accelerator.vendor**,,NVIDIA,AMD,,AMD, -**accelerator.name**,,V100,MI250X,,MI250X, -**accelerator.ISA**,,PTX,GCN,,GCN, -**accelerator.uArch**,,sm_70,gfx90a,,gfx90a, -**interconnect.vendor**,Intel,Mellanox,HPECray,AWS,HPECray, -**interconnect.name**,OmniPath,EDR-Infiniband,Slingshot11,EFA,Slingshot11, -**system-tested.site**,LLNL,LLNL,LLNL,AWS,LLNL, -**system-tested.name**,magma,lassen,tioga,,tioga, -**system-tested.installation-year**,2019,2018,2022,,2022, -**system-tested.description**,`top500 `_,`top500 `_,`top500 `_,`aws/hpc7a `_,`top500 `_, -**top500-system-instances**,Magma (LLNL),Sierra (LLNL),"Frontier (ORNL), Lumi (CSC), Tioga (LLNL)",,"Frontier (ORNL), Lumi (CSC), Tioga (LLNL)", +,Sierra,AWS_PCluster_Hpc7a-zen4-EFA,HPECray-zen3-MI250X-Slingshot,Magma,Piz Daint,Fugaku,Pascal,Eiger,LUMI,Tioga,x86_64 +**site**,LLNL,,,LLNL,CSCS,RIKEN Center for Computational Science,LLNL,CSCS,CSC,LLNL, +**system**,IBM-power9-V100-Infiniband,AWS_PCluster_Hpc7a-zen4-EFA,HPECray-zen3-MI250X-Slingshot,Penguin-icelake-OmniPath,HPECray-haswell-P100-Infiniband,Fujitsu-A64FX-TofuD,Penguin-broadwell-P100-OmniPath,HPECray-zen2-Slingshot,HPECray-zen3-MI250X-Slingshot,HPECray-zen3-MI250X-Slingshot,x86_64 +**integrator.vendor**,IBM,AWS,HPECray,PenguinComputing,HPECray,Fujitsu,Penguin,HPECray,HPECray,HPECray, +**integrator.name**,AC922,ParallelCluster3.7.2-Hpc7a,EX235a,RelionCluster,,FX1000,,,EX235a,EX235a, +**processor.vendor**,IBM,AMD,AMD,Intel,Intel,Fujitsu,Intel,AMD,AMD,AMD, +**processor.name**,POWER9,EPYC-Zen4,EPYC-Zen3,XeonPlatinum924248C,Xeon-E5-2650v3,A64FX,Xeon_E5-2695_v4,EPYC-7742,EPYC-Zen3,EPYC-Zen3, +**processor.ISA**,ppc64le,x86_64,x86_64,x86_64,x86_64,Armv8.2-A-SVE,x86_64,x86_64,x86_64,x86_64,x86_64 +**processor.uArch**,power9,zen4,zen3,icelake,haswell,aarch64,broadwell,zen2,zen3,zen3, +**accelerator.vendor**,NVIDIA,,AMD,,NVIDIA,,NVIDIA,,AMD,AMD, +**accelerator.name**,V100,,MI250X,,P100,,P100,,MI250X,MI250X, +**accelerator.ISA**,PTX,,GCN,,PTX,,PTX,,GCN,GCN, +**accelerator.uArch**,sm_70,,gfx90a,,sm_60,,sm_56,,gfx90a,gfx90a, +**interconnect.vendor**,Mellanox,AWS,HPECray,Intel,HPECray,Fujitsu,Cornelis,HPECray,HPECray,HPECray, +**interconnect.name**,EDR-Infiniband,EFA,Slingshot11,OmniPath,Aries,TofuInterconnectD,OmniPath,Slingshot,Slingshot11,Slingshot11, +**system-tested.site**,LLNL,AWS,LLNL,LLNL,CSCS,R-CCS,LLNL,CSCS,CSC,LLNL, +**system-tested.name**,lassen,,tioga,magma,daint,Fugaku,pascal,daint,LUMI,tioga, +**system-tested.installation-year**,2018,,2022,2019,2017,2020,2018,2017,2023,2022, +**system-tested.description**,`top500 `_,`aws/hpc7a `_,`top500 `_,`top500 `_,`top500 `_,`top500 `_,,`top500 `_,`top500 `_,`top500 `_, +**top500-system-instances**,Sierra (LLNL),,"Frontier (ORNL), LUMI (CSC), Tioga (LLNL)",Magma (LLNL),Piz Daint (CSCS),Fugaku (R-CCS),,,"Frontier (ORNL), LUMI (CSC), Tioga (LLNL)","Frontier (ORNL), LUMI (CSC), Tioga (LLNL)", diff --git a/experiments/amg2023/cuda/execute_experiment.tpl b/experiments/amg2023/cuda/execute_experiment.tpl index 1343ccb0c..8815e9c4f 100755 --- a/experiments/amg2023/cuda/execute_experiment.tpl +++ b/experiments/amg2023/cuda/execute_experiment.tpl @@ -9,8 +9,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} #TODO: Add experiment specific code here - {command} diff --git a/experiments/amg2023/cuda/ramble.yaml b/experiments/amg2023/cuda/ramble.yaml index 741754c2d..0b9587f5b 100644 --- a/experiments/amg2023/cuda/ramble.yaml +++ b/experiments/amg2023/cuda/ramble.yaml @@ -4,11 +4,17 @@ # SPDX-License-Identifier: Apache-2.0 ramble: + include: + - ./configs/spack.yaml + - ./configs/variables.yaml + - ./configs/modifier.yaml + config: deprecated: true spack_flags: install: '--add --keep-stage' concretize: '-U -f' + applications: amg2023: workloads: @@ -23,25 +29,14 @@ ramble: nx: '{n}' ny: '{n}' nz: '{n}' - experiment_setup: '' gtl: ['gtl', 'nogtl'] gtlflag: ['-M"-gpu"', ''] experiments: - # because MPS only works for cuda<=v10 - amg2023_cuda_10_problem1_{gtl}_{n_nodes}_{px}_{py}_{pz}_{nx}_{ny}_{nz}: - variables: - experiment_setup: '{enable_mps}' - env_name: amg2023-cuda-10 - processes_per_node: '8' - n_nodes: '1' - matrices: - - size: - - n - amg2023_cuda_default_problem1_{gtl}_{n_nodes}_{px}_{py}_{pz}_{nx}_{ny}_{nz}: + amg2023_cuda_problem1_{gtl}_{n_nodes}_{px}_{py}_{pz}_{nx}_{ny}_{nz}: variables: - env_name: amg2023-cuda-default - processes_per_node: ['4', '2'] - n_nodes: ['2', '4'] + env_name: amg2023 + processes_per_node: '4' + n_nodes: '2' zips: gtl_info: - gtl @@ -52,22 +47,19 @@ ramble: - gtl_info spack: concretized: true - variables: - cuda_version: ['{cuda10_version}', '{default_cuda_version}'] - cuda_compiler: ['compiler-cuda10', 'default-compiler'] - versions: ['10', 'default'] packages: - hypre-cuda-{versions}: - spack_spec: 'hypre@2.28.0 +mpi+cuda+mixedint cuda_arch=={cuda_arch} ^cuda@{cuda_version}' - compiler: '{cuda_compiler}' - amg2023-cuda-{versions}: - spack_spec: 'amg2023@develop +mpi+cuda ^cuda@{cuda_version}' - compiler: '{cuda_compiler}' + hypre: + spack_spec: hypre@2.28.0 +mpi+cuda+mixedint{modifier_spack_variant} cuda_arch=={cuda_arch} ^cuda@{default_cuda_version} + compiler: default-compiler + amg2023: + spack_spec: amg2023@develop +mpi+cuda{modifier_spack_variant} cuda_arch=={cuda_arch} ^cuda@{default_cuda_version} + compiler: default-compiler environments: - amg2023-cuda-{versions}: + amg2023: packages: - - cublas-cuda-{versions} + - cublas-cuda - lapack - default-mpi - - hypre-cuda-{versions} - - amg2023-cuda-{versions} + - hypre + - amg2023 + - '{modifier_package_name}' diff --git a/experiments/amg2023/openmp/execute_experiment.tpl b/experiments/amg2023/openmp/execute_experiment.tpl index b803b4898..89e73cf49 100755 --- a/experiments/amg2023/openmp/execute_experiment.tpl +++ b/experiments/amg2023/openmp/execute_experiment.tpl @@ -10,8 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} - {command} diff --git a/experiments/amg2023/openmp/ramble.yaml b/experiments/amg2023/openmp/ramble.yaml index a6612e3d1..d397207ad 100644 --- a/experiments/amg2023/openmp/ramble.yaml +++ b/experiments/amg2023/openmp/ramble.yaml @@ -7,6 +7,7 @@ ramble: include: - ./configs/spack.yaml - ./configs/variables.yaml + - ./configs/modifier.yaml config: deprecated: true @@ -31,10 +32,9 @@ ramble: nx: '{n}' ny: '{n}' nz: '{n}' - experiment_setup: '' processes_per_node: ['8', '4'] n_nodes: ['1', '2'] - threads_per_node_core: ['8', '10', '13'] #TODO: Specify n_threads according to available n_nodes and n_ranks + threads_per_node_core: ['4', '6', '12'] omp_num_threads: '{threads_per_node_core} * {n_nodes}' experiments: amg2023_omp_problem1_{n_nodes}_{omp_num_threads}_{px}_{py}_{pz}_{nx}_{ny}_{nz}: @@ -42,16 +42,16 @@ ramble: env_name: amg2023-omp matrices: - size_threads: - - n # TODO: Filter matrix - - threads_per_node_core # TODO: Filter matrix + - n + - threads_per_node_core spack: concretized: true packages: hypre-omp: - spack_spec: hypre@2.28.0 +mpi+openmp+mixedint + spack_spec: hypre@2.28.0 +mpi+openmp+mixedint{modifier_spack_variant} compiler: default-compiler amg2023-omp: - spack_spec: amg2023@develop +mpi+openmp + spack_spec: amg2023@develop +mpi+openmp{modifier_spack_variant} compiler: default-compiler environments: amg2023-omp: @@ -60,3 +60,4 @@ ramble: - default-mpi - hypre-omp - amg2023-omp + - '{modifier_package_name}' diff --git a/experiments/amg2023/rocm/execute_experiment.tpl b/experiments/amg2023/rocm/execute_experiment.tpl index af9908524..89e73cf49 100755 --- a/experiments/amg2023/rocm/execute_experiment.tpl +++ b/experiments/amg2023/rocm/execute_experiment.tpl @@ -10,8 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} #TODO: Add experiment specific code here - {command} diff --git a/experiments/amg2023/rocm/ramble.yaml b/experiments/amg2023/rocm/ramble.yaml index d1ec1dda2..6e0d19045 100644 --- a/experiments/amg2023/rocm/ramble.yaml +++ b/experiments/amg2023/rocm/ramble.yaml @@ -4,11 +4,17 @@ # SPDX-License-Identifier: Apache-2.0 ramble: + include: + - ./configs/spack.yaml + - ./configs/variables.yaml + - ./configs/modifier.yaml + config: deprecated: true spack_flags: install: '--add --keep-stage' concretize: '-U -f' + applications: amg2023: workloads: @@ -23,7 +29,6 @@ ramble: nx: '{n}' ny: '{n}' nz: '{n}' - experiment_setup: '' experiments: '{env_name}_problem1_{n_nodes}_{px}_{py}_{pz}_{nx}_{ny}_{nz}': variables: @@ -41,10 +46,10 @@ ramble: gtl: ["gtl", "no-gtl"] packages: hypre-{gtl}: - spack_spec: hypre@2.28.0 +mpi+rocm+mixedint amdgpu_target={rocm_arch} + spack_spec: hypre@2.28.0 +mpi+rocm+mixedint{modifier_spack_variant} amdgpu_target={rocm_arch} compiler: compiler-rocm amg2023-gpu-{gtl}: - spack_spec: amg2023@develop +mpi+rocm amdgpu_target={rocm_arch} + spack_spec: amg2023@develop +mpi+rocm{modifier_spack_variant} amdgpu_target={rocm_arch} compiler: compiler-rocm environments: amg2023-gpu-{gtl}: @@ -54,3 +59,4 @@ ramble: - mpi-rocm-{gtl} - hypre-{gtl} - amg2023-gpu-{gtl} + - '{modifier_package_name}' diff --git a/experiments/gromacs/cuda/execute_experiment.tpl b/experiments/gromacs/cuda/execute_experiment.tpl new file mode 100755 index 000000000..ab02968fe --- /dev/null +++ b/experiments/gromacs/cuda/execute_experiment.tpl @@ -0,0 +1,16 @@ +#!/bin/bash + +## Copyright 2023 Lawrence Livermore National Security, LLC and other +## Benchpark Project Developers. See the top-level COPYRIGHT file for details. +## +## SPDX-License-Identifier: Apache-2.0 + +{batch_nodes} +{batch_ranks} +{batch_timeout} + +cd {experiment_run_dir} + +{experiment_setup} + +{command} diff --git a/experiments/gromacs/cuda/ramble.yaml b/experiments/gromacs/cuda/ramble.yaml new file mode 100644 index 000000000..6591db87b --- /dev/null +++ b/experiments/gromacs/cuda/ramble.yaml @@ -0,0 +1,61 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +ramble: + config: + deprecated: true + spack_flags: + install: '--add --keep-stage' + concretize: '-U -f' + + applications: + gromacs: + workloads: + water_gmx50_adac: + env_vars: + set: + OMP_PROC_BIND: close + OMP_PLACES: cores + OMP_NUM_THREADS: '{omp_num_threads}' + variables: + experiment_setup: '' + n_ranks: '{processes_per_node} * {n_nodes}' + processes_per_node: '2' + n_nodes: '4' + omp_num_threads: '10' + target: 'gpu' + experiments: + gromacs_water_gmx50_adac_size{size}_dlb{dlb}_pin{pin}_target{target}_maxh{maxh}_nsteps{nsteps}_nstlist{nstlist}_npme{npme}: + variables: + dlb: 'no' + pin: 'off' + nb: '{target}' + pme: 'auto' + bonded: 'cpu' + update: '{target}' + maxh: '0.05' + nsteps: '1000' + nstlist: '200' + npme: '1' + size: '1536' + + spack: + concretized: true + packages: + gromacs: + spack_spec: gromacs@2023.3 +mpi+openmp+cuda~hwloc~double cuda_arch=={cuda_arch} + compiler: compiler-clang + cuda: + spack_spec: cuda@{default_cuda_version}+allow-unsupported-compilers + compiler: compiler-clang + environments: + gromacs: + packages: + - cuda + - blas + - lapack + - mpi-clang + - fftw + - gromacs diff --git a/experiments/gromacs/openmp/execute_experiment.tpl b/experiments/gromacs/openmp/execute_experiment.tpl new file mode 100755 index 000000000..ab02968fe --- /dev/null +++ b/experiments/gromacs/openmp/execute_experiment.tpl @@ -0,0 +1,16 @@ +#!/bin/bash + +## Copyright 2023 Lawrence Livermore National Security, LLC and other +## Benchpark Project Developers. See the top-level COPYRIGHT file for details. +## +## SPDX-License-Identifier: Apache-2.0 + +{batch_nodes} +{batch_ranks} +{batch_timeout} + +cd {experiment_run_dir} + +{experiment_setup} + +{command} diff --git a/experiments/gromacs/openmp/ramble.yaml b/experiments/gromacs/openmp/ramble.yaml new file mode 100644 index 000000000..166bb9620 --- /dev/null +++ b/experiments/gromacs/openmp/ramble.yaml @@ -0,0 +1,59 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +ramble: + config: + deprecated: true + spack_flags: + install: '--add --keep-stage' + concretize: '-U -f' + + applications: + gromacs: + workloads: + water_gmx50_adac: + env_vars: + set: + OMP_PROC_BIND: close + OMP_PLACES: cores + OMP_NUM_THREADS: '{omp_num_threads}' + variables: + experiment_setup: '' + n_ranks: '{processes_per_node} * {n_nodes}' + processes_per_node: '4' + n_nodes: '2' + omp_num_threads: '16' + target: 'cpu' + experiments: + gromacs_water_gmx50_adac_size{size}_dlb{dlb}_pin{pin}_target{target}_maxh{maxh}_nsteps{nsteps}_nstlist{nstlist}_npme{npme}: + variables: + dlb: 'no' + pin: 'off' + nb: '{target}' + pme: 'auto' + bonded: '{target}' + update: '{target}' + maxh: '0.05' + nsteps: '1000' + nstlist: '200' + npme: '0' + size: '1536' + + spack: + concretized: true + packages: + fftw: + spack_spec: fftw@3.3.10 +mpi+openmp + compiler: default-compiler + gromacs: + spack_spec: gromacs@main +mpi+openmp~hwloc + compiler: default-compiler + environments: + gromacs: + packages: + - lapack + - default-mpi + - fftw + - gromacs diff --git a/experiments/gromacs/rocm/execute_experiment.tpl b/experiments/gromacs/rocm/execute_experiment.tpl new file mode 100755 index 000000000..ab02968fe --- /dev/null +++ b/experiments/gromacs/rocm/execute_experiment.tpl @@ -0,0 +1,16 @@ +#!/bin/bash + +## Copyright 2023 Lawrence Livermore National Security, LLC and other +## Benchpark Project Developers. See the top-level COPYRIGHT file for details. +## +## SPDX-License-Identifier: Apache-2.0 + +{batch_nodes} +{batch_ranks} +{batch_timeout} + +cd {experiment_run_dir} + +{experiment_setup} + +{command} diff --git a/experiments/gromacs/rocm/ramble.yaml b/experiments/gromacs/rocm/ramble.yaml new file mode 100644 index 000000000..c1aa8368e --- /dev/null +++ b/experiments/gromacs/rocm/ramble.yaml @@ -0,0 +1,61 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +ramble: + config: + deprecated: true + spack_flags: + install: '--add --keep-stage' + concretize: '-U -f' + + applications: + gromacs: + workloads: + water_gmx50_adac: + env_vars: + set: + OMP_PROC_BIND: close + OMP_PLACES: cores + OMP_NUM_THREADS: '{omp_num_threads}' + variables: + experiment_setup: '' + n_ranks: '{processes_per_node} * {n_nodes}' + processes_per_node: '8' + n_nodes: '1' + omp_num_threads: '8' + target: 'gpu' + experiments: + gromacs_water_gmx50_adac_size{size}_dlb{dlb}_pin{pin}_target{target}_maxh{maxh}_nsteps{nsteps}_nstlist{nstlist}_npme{npme}: + variables: + dlb: 'no' + pin: 'off' + nb: '{target}' + pme: 'auto' + bonded: '{target}' + update: '{target}' + maxh: '0.05' + nsteps: '1000' + nstlist: '200' + npme: '0' + size: '1536' + + spack: + concretized: true + packages: + hipsycl: + spack_spec: hipsycl@23.10.0 + compiler: default-compiler + gromacs: + spack_spec: gromacs@2023.3 +mpi+openmp+rocm~hwloc~double amdgpu_target={rocm_arch} + compiler: default-compiler + environments: + gromacs: + packages: + - hipsycl + - blas-rocm + - lapack-rocm + - mpi-rocm-gtl + - fftw + - gromacs diff --git a/experiments/hpcc/mpi-only/execute_experiment.tpl b/experiments/hpcc/mpi-only/execute_experiment.tpl index b803b4898..89e73cf49 100755 --- a/experiments/hpcc/mpi-only/execute_experiment.tpl +++ b/experiments/hpcc/mpi-only/execute_experiment.tpl @@ -10,8 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} - {command} diff --git a/experiments/hpcc/mpi-only/ramble.yaml b/experiments/hpcc/mpi-only/ramble.yaml index 39a50a154..fc6ff6ed9 100644 --- a/experiments/hpcc/mpi-only/ramble.yaml +++ b/experiments/hpcc/mpi-only/ramble.yaml @@ -20,7 +20,6 @@ ramble: standard: variables: n_ranks: ['1', '2', '4', '8'] - experiment_setup: '' experiments: hpcc_standard_{n_nodes}_{n_ranks}: variables: diff --git a/experiments/hpcg/openmp/execute_experiment.tpl b/experiments/hpcg/openmp/execute_experiment.tpl index d24437712..89e73cf49 100755 --- a/experiments/hpcg/openmp/execute_experiment.tpl +++ b/experiments/hpcg/openmp/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/hpl/openmp/execute_experiment.tpl b/experiments/hpl/openmp/execute_experiment.tpl index b803b4898..89e73cf49 100755 --- a/experiments/hpl/openmp/execute_experiment.tpl +++ b/experiments/hpl/openmp/execute_experiment.tpl @@ -10,8 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} - {command} diff --git a/experiments/hpl/openmp/ramble.yaml b/experiments/hpl/openmp/ramble.yaml index 05462a12a..03083e4d9 100644 --- a/experiments/hpl/openmp/ramble.yaml +++ b/experiments/hpl/openmp/ramble.yaml @@ -30,7 +30,6 @@ ramble: Ns: 10000 N-NBs: 1 NBs: 128 - experiment_setup: '' processes_per_node: '8' n_nodes: '1' omp_num_threads: ['2', '4', '8'] diff --git a/experiments/lammps/openmp/execute_experiment.tpl b/experiments/lammps/openmp/execute_experiment.tpl index d24437712..89e73cf49 100644 --- a/experiments/lammps/openmp/execute_experiment.tpl +++ b/experiments/lammps/openmp/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/lammps/openmp/ramble.yaml b/experiments/lammps/openmp/ramble.yaml index 5c1bd7890..416d5f462 100644 --- a/experiments/lammps/openmp/ramble.yaml +++ b/experiments/lammps/openmp/ramble.yaml @@ -34,7 +34,7 @@ ramble: experiments: scaling_{n_nodes}nodes_{size_name}: variables: - env_name: lammps_omp + env_name: lammps processes_per_node: ['36'] zips: problems: @@ -48,12 +48,12 @@ ramble: spack: concretized: true packages: - lammps_omp: - spack_spec: lammps@20230802.1 +opt+manybody+molecule+kspace+rigid+openmp+openmp-package+asphere+dpd-basic+dpd-meso+dpd-react+dpd-smooth+reaxff + lammps: + spack_spec: lammps@20231121 +opt+manybody+molecule+kspace+rigid+openmp+openmp-package+asphere+dpd-basic+dpd-meso+dpd-react+dpd-smooth+reaxff compiler: default-compiler environments: - lammps_omp: + lammps: packages: - lapack - default-mpi - - lammps_omp + - lammps diff --git a/experiments/lbann/cuda/execute_experiment.tpl b/experiments/lbann/cuda/execute_experiment.tpl index f2026d0e2..1e2ea813e 100755 --- a/experiments/lbann/cuda/execute_experiment.tpl +++ b/experiments/lbann/cuda/execute_experiment.tpl @@ -10,8 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} - {command} diff --git a/experiments/lbann/cuda/ramble.yaml b/experiments/lbann/cuda/ramble.yaml index 547a3cbdf..027481554 100644 --- a/experiments/lbann/cuda/ramble.yaml +++ b/experiments/lbann/cuda/ramble.yaml @@ -31,7 +31,6 @@ ramble: nx: '{n}' ny: '{n}' nz: '{n}' - experiment_setup: '' processes_per_node: ['8', '4'] n_nodes: ['1', '2'] threads_per_node_core: ['8', '10', '13'] #TODO: Specify n_threads according to available n_nodes and n_ranks diff --git a/experiments/lbann/rocm/execute_experiment.tpl b/experiments/lbann/rocm/execute_experiment.tpl index f2026d0e2..1e2ea813e 100755 --- a/experiments/lbann/rocm/execute_experiment.tpl +++ b/experiments/lbann/rocm/execute_experiment.tpl @@ -10,8 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} - {command} diff --git a/experiments/md-test/mpi-only/execute_experiment.tpl b/experiments/md-test/mpi-only/execute_experiment.tpl index b803b4898..89e73cf49 100755 --- a/experiments/md-test/mpi-only/execute_experiment.tpl +++ b/experiments/md-test/mpi-only/execute_experiment.tpl @@ -10,8 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - -{experiment_setup} - {command} diff --git a/experiments/md-test/mpi-only/ramble.yaml b/experiments/md-test/mpi-only/ramble.yaml index dd6ac11a4..ef05eb7f6 100644 --- a/experiments/md-test/mpi-only/ramble.yaml +++ b/experiments/md-test/mpi-only/ramble.yaml @@ -21,7 +21,6 @@ ramble: variables: n_ranks: ['1', '2', '4', '8'] n_nodes: '1' - experiment_setup: '' experiments: mdtest_multifile_{num-objects}_{iterations}_{additional-args}_{n_nodes}_{n_ranks}: variables: diff --git a/experiments/osu-micro-benchmarks/mpi-only/execute_experiment.tpl b/experiments/osu-micro-benchmarks/mpi-only/execute_experiment.tpl index d24437712..89e73cf49 100644 --- a/experiments/osu-micro-benchmarks/mpi-only/execute_experiment.tpl +++ b/experiments/osu-micro-benchmarks/mpi-only/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/raja-perf/cuda/execute_experiment.tpl b/experiments/raja-perf/cuda/execute_experiment.tpl index d24437712..89e73cf49 100644 --- a/experiments/raja-perf/cuda/execute_experiment.tpl +++ b/experiments/raja-perf/cuda/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/raja-perf/mpi-only/execute_experiment.tpl b/experiments/raja-perf/mpi-only/execute_experiment.tpl index d24437712..89e73cf49 100644 --- a/experiments/raja-perf/mpi-only/execute_experiment.tpl +++ b/experiments/raja-perf/mpi-only/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/raja-perf/openmp/execute_experiment.tpl b/experiments/raja-perf/openmp/execute_experiment.tpl index d24437712..89e73cf49 100644 --- a/experiments/raja-perf/openmp/execute_experiment.tpl +++ b/experiments/raja-perf/openmp/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/raja-perf/rocm/execute_experiment.tpl b/experiments/raja-perf/rocm/execute_experiment.tpl index d24437712..89e73cf49 100644 --- a/experiments/raja-perf/rocm/execute_experiment.tpl +++ b/experiments/raja-perf/rocm/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/saxpy/cuda/execute_experiment.tpl b/experiments/saxpy/cuda/execute_experiment.tpl index d24437712..8815e9c4f 100755 --- a/experiments/saxpy/cuda/execute_experiment.tpl +++ b/experiments/saxpy/cuda/execute_experiment.tpl @@ -5,11 +5,8 @@ # SPDX-License-Identifier: Apache-2.0 {batch_nodes} -{batch_ranks} {batch_timeout} cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/saxpy/cuda/ramble.yaml b/experiments/saxpy/cuda/ramble.yaml index d9d157038..677a096f3 100644 --- a/experiments/saxpy/cuda/ramble.yaml +++ b/experiments/saxpy/cuda/ramble.yaml @@ -4,11 +4,17 @@ # SPDX-License-Identifier: Apache-2.0 ramble: + include: + - ./configs/spack.yaml + - ./configs/variables.yaml + - ./configs/modifier.yaml + config: deprecated: true spack_flags: install: '--add --keep-stage' concretize: '-U -f' + applications: saxpy: workloads: @@ -27,10 +33,11 @@ ramble: concretized: true packages: saxpy: - spack_spec: saxpy@1.0.0 +cuda cuda_arch=={cuda_arch} ^cuda@{default_cuda_version} + spack_spec: saxpy@1.0.0 +cuda{modifier_spack_variant} cuda_arch=={cuda_arch} ^cuda@{default_cuda_version} compiler: default-compiler environments: saxpy: packages: - default-mpi - saxpy + - '{modifier_package_name}' diff --git a/experiments/saxpy/openmp/execute_experiment.tpl b/experiments/saxpy/openmp/execute_experiment.tpl index d24437712..89e73cf49 100755 --- a/experiments/saxpy/openmp/execute_experiment.tpl +++ b/experiments/saxpy/openmp/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/saxpy/openmp/ramble.yaml b/experiments/saxpy/openmp/ramble.yaml index 96e6efa04..cc7bdf7da 100644 --- a/experiments/saxpy/openmp/ramble.yaml +++ b/experiments/saxpy/openmp/ramble.yaml @@ -7,6 +7,7 @@ ramble: include: - ./configs/spack.yaml - ./configs/variables.yaml + - ./configs/modifier.yaml config: deprecated: true @@ -39,10 +40,11 @@ ramble: concretized: true packages: saxpy: - spack_spec: saxpy@1.0.0 +openmp ^cmake@3.23.1 + spack_spec: saxpy@1.0.0 +openmp{modifier_spack_variant} ^cmake@3.23.1 compiler: default-compiler environments: saxpy: packages: - default-mpi - saxpy + - '{modifier_package_name}' diff --git a/experiments/saxpy/rocm/execute_experiment.tpl b/experiments/saxpy/rocm/execute_experiment.tpl index d24437712..89e73cf49 100755 --- a/experiments/saxpy/rocm/execute_experiment.tpl +++ b/experiments/saxpy/rocm/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/experiments/saxpy/rocm/ramble.yaml b/experiments/saxpy/rocm/ramble.yaml index b5657b34d..dd80075d0 100644 --- a/experiments/saxpy/rocm/ramble.yaml +++ b/experiments/saxpy/rocm/ramble.yaml @@ -7,12 +7,14 @@ ramble: include: - ./configs/spack.yaml - ./configs/variables.yaml + - ./configs/modifier.yaml config: deprecated: true spack_flags: install: '--add --keep-stage' concretize: '-U -f' + applications: saxpy: workloads: @@ -31,10 +33,11 @@ ramble: concretized: true packages: saxpy: - spack_spec: saxpy@1.0.0 +rocm amdgpu_target={rocm_arch} + spack_spec: saxpy@1.0.0 +rocm{modifier_spack_variant} amdgpu_target={rocm_arch} compiler: default-compiler environments: saxpy: packages: - default-mpi - saxpy + - '{modifier_package_name}' diff --git a/experiments/stream/openmp/execute_experiment.tpl b/experiments/stream/openmp/execute_experiment.tpl index d24437712..89e73cf49 100755 --- a/experiments/stream/openmp/execute_experiment.tpl +++ b/experiments/stream/openmp/execute_experiment.tpl @@ -10,6 +10,4 @@ cd {experiment_run_dir} -{spack_setup} - {command} diff --git a/modifiers/caliper/configs/modifier.yaml b/modifiers/caliper/configs/modifier.yaml new file mode 100644 index 000000000..059bad60c --- /dev/null +++ b/modifiers/caliper/configs/modifier.yaml @@ -0,0 +1,17 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +variables: + modifier_package_name: 'caliper' + modifier_spack_variant: '+caliper' + +modifiers: + - name: caliper + mode: time + +spack: + packages: + caliper: + spack_spec: caliper+adiak+mpi~libunwind~libdw~papi diff --git a/modifiers/caliper/modifier.py b/modifiers/caliper/modifier.py new file mode 100644 index 000000000..012c6168d --- /dev/null +++ b/modifiers/caliper/modifier.py @@ -0,0 +1,33 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +from ramble.modkit import * + + +class Caliper(SpackModifier): + """Define a modifier for Caliper""" + + name = "caliper" + + tags("profiler", "performance-analysis") + + maintainers("pearce8") + + mode("time", description="Platform-independent collection of time") + + _cali_datafile = "{experiment_run_dir}/{experiment_name}.cali" + + env_var_modification( + "CALI_CONFIG", + "spot(output={})".format(_cali_datafile), + method="set", + modes=["time"], + ) + + archive_pattern(_cali_datafile) + + software_spec("caliper", spack_spec="caliper") + + required_package("caliper") diff --git a/modifiers/modifier_repo.yaml b/modifiers/modifier_repo.yaml new file mode 100644 index 000000000..23ed59a45 --- /dev/null +++ b/modifiers/modifier_repo.yaml @@ -0,0 +1,3 @@ +repo: + namespace: benchpark + subdirectory: '' diff --git a/modifiers/none/configs/modifier.yaml b/modifiers/none/configs/modifier.yaml new file mode 100644 index 000000000..280cb6db1 --- /dev/null +++ b/modifiers/none/configs/modifier.yaml @@ -0,0 +1,8 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +variables: + modifier_package_name: '' + modifier_spack_variant: '' diff --git a/repo/amg2023/application.py b/repo/amg2023/application.py index 7713f9ab6..0c8cc3d80 100644 --- a/repo/amg2023/application.py +++ b/repo/amg2023/application.py @@ -53,5 +53,3 @@ class Amg2023(SpackApplication): #TODO: Fix the FOM success_criteria(...) success_criteria('pass', mode='string', match=r'Figure of Merit \(FOM\)', file='{experiment_run_dir}/{experiment_name}.out') - def evaluate_success(self): - return True diff --git a/repo/cusolver/package.py b/repo/cusolver/package.py new file mode 100644 index 000000000..b2b74dad0 --- /dev/null +++ b/repo/cusolver/package.py @@ -0,0 +1,11 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +from spack.package import * + + +class Cusolver(Package): + + provides("lapack") diff --git a/repo/gromacs/application.py b/repo/gromacs/application.py new file mode 100644 index 000000000..fa5c6895c --- /dev/null +++ b/repo/gromacs/application.py @@ -0,0 +1,215 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +from ramble.appkit import * +from ramble.expander import Expander + + +class Gromacs(SpackApplication): + '''Define a Gromacs application''' + name = 'gromacs' + + maintainers('douglasjacobsen') + + tags('molecular-dynamics') + + default_compiler('gcc9', spack_spec='gcc@9.3.0') + software_spec('impi2018', spack_spec='intel-mpi@2018.4.274') + software_spec('gromacs', spack_spec='gromacs@2023.3', compiler='gcc12') + + executable('pre-process', 'gmx_mpi grompp ' + + '-f {input_path}/{type}.mdp ' + + '-c {input_path}/conf.gro ' + + '-p {input_path}/topol.top ' + + '-o exp_input.tpr', use_mpi=False) + executable('execute-nsteps-gen', 'gmx_mpi mdrun -notunepme ' + + '-v -resethway -noconfout -nsteps 4000 ' + + '-s exp_input.tpr', use_mpi=True) + executable('execute-nsteps', 'gmx_mpi mdrun -notunepme ' + + '-v -resethway -noconfout -nsteps 4000 ' + + '-s {input_path}', use_mpi=True) + executable('execute-adac', 'gmx_mpi mdrun ' + + '-resethway -noconfout -dlb {dlb} -pin {pin} -nb {nb} -pme {pme} -bonded {bonded} -update {update} ' + + '-maxh {maxh} -nsteps {nsteps} -notunepme -nstlist {nstlist} -npme {npme} ' + + '-v -s exp_input.tpr', use_mpi=True) + + input_file('water_gmx50_bare', url='https://ftp.gromacs.org/pub/benchmarks/water_GMX50_bare.tar.gz', + sha256='2219c10acb97787f80f6638132bad3ff2ca1e68600eef1bc8b89d9560e74c66a', + description='') + input_file('water_bare_hbonds', url='https://ftp.gromacs.org/pub/benchmarks/water_bare_hbonds.tar.gz', + sha256='b2e09d30f5c6b00ecf1c13ea6fa715ad132747863ef89f983f6c09a872cf2776', + description='') + input_file('lignocellulose', + url='https://repository.prace-ri.eu/ueabs/GROMACS/1.2/GROMACS_TestCaseB.tar.gz', + sha256='8a12db0232465e1d47c6a4eb89f615cdbbdc8fc360a86088b131331bd462f35c', + description='A model of cellulose and lignocellulosic biomass in an aqueous ' + + 'solution. This system of 3.3M atoms is inhomogeneous, at ' + + 'least with GROMACS 4.5. This system uses reaction-field' + + 'electrostatics instead of PME and therefore should scale well.') + input_file('HECBioSim', + url='https://github.com/victorusu/GROMACS_Benchmark_Suite/archive/refs/tags/1.0.0.tar.gz', + sha256='9cb2ad61ec2a422fc33578047e7cb2fd2c37ae9a75a6162d662fa2b711e9737f', + description='https://www.hecbiosim.ac.uk/access-hpc/benchmarks') + + input_file('BenchPEP', url='https://www.mpinat.mpg.de/benchPEP.zip', + sha256='f11745201dbb9e6a29a39cb016ee8123f6b0f519b250c94660f0a9623e497b22', + description='12M Atoms, Peptides in Water, 2fs time step, all bonds constrained. https://www.mpinat.mpg.de/grubmueller/bench') + + input_file('BenchPEP_h', url='https://www.mpinat.mpg.de/benchPEP-h.zip', + sha256='3ca8902fd9a6cf005b266f83b57217397b4ba4af987b97dc01e04185bd098bce', + description='12M Atoms, Peptides in Water, 2fs time step, h-bonds constrained. https://www.mpinat.mpg.de/grubmueller/bench') + + input_file('BenchMEM', url='https://www.mpinat.mpg.de/benchMEM.zip', + sha256='3c1c8cd4f274d532f48c4668e1490d389486850d6b3b258dfad4581aa11380a4', + description='82k atoms, protein in membrane surrounded by water, 2 fs time step. https://www.mpinat.mpg.de/grubmueller/bench') + + input_file('BenchRIB', url='https://www.mpinat.mpg.de/benchRIB.zip', + sha256='39acb014a79ed9a9ff2ad6294a2c09f9b85ea6986dfc204a3639814503eeb60a', + description='2 M atoms, ribosome in water, 4 fs time step. https://www.mpinat.mpg.de/grubmueller/bench') + + input_file('JCP_benchmarks', + url='https://zenodo.org/record/3893789/files/GROMACS_heterogeneous_parallelization_benchmark_info_and_systems_JCP.tar.gz?download=1', + sha256='82449291f44f4d5b7e5c192d688b57b7c2a2e267fe8b12e7a15b5d68f96c7b20', + description='GROMACS_heterogeneous_parallelization_benchmark_info_and_systems_JCP') + + workload('water_gmx50_adac', executables=['pre-process', 'execute-adac'], + input='water_gmx50_bare') + workload('water_gmx50', executables=['pre-process', 'execute-gen'], + input='water_gmx50_bare') + workload('water_bare', executables=['pre-process', 'execute-gen'], + input='water_bare_hbonds') + workload('lignocellulose', executables=['execute'], + input='lignocellulose') + workload('hecbiosim', executables=['execute'], + input='HECBioSim') + workload('benchpep', executables=['execute'], + input='BenchPEP') + workload('benchpep_h', executables=['execute'], + input='BenchPEP_h') + workload('benchmem', executables=['execute'], + input='BenchMEM') + workload('benchrib', executables=['execute'], + input='BenchRIB') + workload('stmv_rf', executables=['pre-process', 'execute-gen'], + input='JCP_benchmarks') + workload('stmv_pme', executables=['pre-process', 'execute-gen'], + input='JCP_benchmarks') + workload('rnase_cubic', executables=['pre-process', 'execute-gen'], + input='JCP_benchmarks') + workload('ion_channel', executables=['pre-process', 'execute-gen'], + input='JCP_benchmarks') + + workload_variable('dlb', default='no', + description='Dynamic load balancing (with DD): auto, no, yes', + workloads=['water_gmx50_adac']) + workload_variable('pin', default='off', + description='Whether mdrun should try to set thread affinities: auto, on, off', + workloads=['water_gmx50_adac']) + workload_variable('nb', default='auto', + description='Calculate non-bonded interactions on: auto, cpu, gpu', + workloads=['water_gmx50_adac']) + workload_variable('pme', default='auto', + description='Perform PME calculations on: auto, cpu, gpu', + workloads=['water_gmx50_adac']) + workload_variable('bonded', default='auto', + description='Perform bonded calculations on: auto, cpu, gpu', + workloads=['water_gmx50_adac']) + workload_variable('update', default='auto', + description='Perform update and constraints on: auto, cpu, gpu', + workloads=['water_gmx50_adac']) + workload_variable('maxh', default='0.05', + description='Terminate after 0.99 times this time (hours)', + workloads=['water_gmx50_adac']) + workload_variable('nsteps', default='-1', + description='Run this number of steps (-1 means infinite, -2 means use mdp option, smaller is invalid)', + workloads=['water_gmx50_adac']) + workload_variable('nstlist', default='200', + description='Set nstlist when using a Verlet buffer tolerance (0 is guess)', + workloads=['water_gmx50_adac']) + workload_variable('npme', default='0', + description='Number of separate ranks to be used for PME, -1 is guess', + workloads=['water_gmx50_adac']) + + workload_variable('size', default='1536', + values=['0000.65', '0000.96', '0001.5', + '0003', '0006', '0012', '0024', + '0048', '0096', '0192', '0384', + '0768', '1536', '3072'], + description='Workload size', + workloads=['water_gmx50', 'water_bare', 'water_gmx50_adac']) + workload_variable('type', default='pme', + description='Workload type.', + values=['pme', 'rf'], + workloads=['water_gmx50', 'water_bare', 'water_gmx50_adac']) + workload_variable('input_path', default='{water_gmx50_bare}/{size}', + description='Input path for water GMX50', + workloads=['water_gmx50', 'water_gmx50_adac']) + workload_variable('input_path', default='{water_bare_hbonds}/{size}', + description='Input path for water bare hbonds', + workload='water_bare') + workload_variable('input_path', default='{lignocellulose}/lignocellulose-rf.tpr', + description='Input path for lignocellulose', + workload='lignocellulose') + workload_variable('type', default='Crambin', + description='Workload type. Valid values are ''Crambin'', ''Glutamine-Binding-Protein'', ''hEGFRDimer'', ''hEGFRDimerPair'', ''hEGFRDimerSmallerPL'', ''hEGFRtetramerPair''', + workload='hecbiosim') + workload_variable('input_path', default='{HECBioSim}/HECBioSim/{type}/benchmark.tpr', + description='Input path for hecbiosim', + workload='hecbiosim') + workload_variable('input_path', default='{BenchPEP}/benchPEP.tpr', + description='Input path for Bench PEP workload', + workload='benchpep') + workload_variable('input_path', default='{BenchMEM}/benchMEM.tpr', + description='Input path for Bench MEM workload', + workload='benchmem') + workload_variable('input_path', default='{BenchRIB}/benchRIB.tpr', + description='Input path for Bench RIB workload', + workload='benchrib') + workload_variable('input_path', default='{BenchPEP_h}/benchPEP-h.tpr', + description='Input path for Bench PEP-h workload', + workload='benchpep_h') + workload_variable('type', default='rf_nvt', + description='Workload type for JCP_benchmarks', + workload='stmv_rf') + workload_variable('type', default='pme_nvt', + description='Workload type for JCP_benchmarks', + workload='stmv_pme') + workload_variable('type', default='grompp', + description='Workload type for JCP_benchmarks', + workloads=['ion_channel', 'rnase_cubic']) + workload_variable('input_path', default='{JCP_benchmarks}/stmv', + description='Input path for JCP_benchmark {workload_name}', + workloads=['stmv_rf', 'stmv_pme']) + workload_variable('input_path', default='{JCP_benchmarks}/{workload_name}', + description='Input path for JCP_benchmark {workload_name}', + workloads=['ion_channel', 'rnase_cubic']) + + log_str = os.path.join(Expander.expansion_str('experiment_run_dir'), + 'md.log') + + figure_of_merit('Core Time', log_file=log_str, + fom_regex=r'\s+Time:\s+(?P[0-9]+\.[0-9]+).*', + group_name='core_time', units='s') + + figure_of_merit('Wall Time', log_file=log_str, + fom_regex=r'\s+Time:\s+[0-9]+\.[0-9]+\s+' + + r'(?P[0-9]+\.[0-9]+).*', + group_name='wall_time', units='s') + + figure_of_merit('Percent Core Time', log_file=log_str, + fom_regex=r'\s+Time:\s+[0-9]+\.[0-9]+\s+[0-9]+\.[0-9]+\s+' + + r'(?P[0-9]+\.[0-9]+).*', + group_name='perc_core_time', units='%') + + figure_of_merit('Nanosecs per day', log_file=log_str, + fom_regex=r'Performance:\s+' + + r'(?P[0-9]+\.[0-9]+).*', + group_name='ns_per_day', units='ns/day') + + figure_of_merit('Hours per nanosec', log_file=log_str, + fom_regex=r'Performance:\s+[0-9]+\.[0-9]+\s+' + + r'(?P[0-9]+\.[0-9]+).*', + group_name='hours_per_ns', units='hours/ns') diff --git a/repo/gromacs/gmxDetectCpu-cmake-3.14.patch b/repo/gromacs/gmxDetectCpu-cmake-3.14.patch new file mode 100644 index 000000000..2f94a6a99 --- /dev/null +++ b/repo/gromacs/gmxDetectCpu-cmake-3.14.patch @@ -0,0 +1,12 @@ +--- a/cmake/gmxDetectCpu.cmake ++++ b/cmake/gmxDetectCpu.cmake +@@ -83,7 +83,7 @@ function(gmx_run_cpu_detection TYPE) + set(GCC_INLINE_ASM_DEFINE "-DGMX_X86_GCC_INLINE_ASM=0") + endif() + +- set(_compile_definitions "${GCC_INLINE_ASM_DEFINE} -I${PROJECT_SOURCE_DIR}/src -DGMX_CPUINFO_STANDALONE ${GMX_STDLIB_CXX_FLAGS} -DGMX_TARGET_X86=${GMX_TARGET_X86_VALUE}") ++ set(_compile_definitions ${GCC_INLINE_ASM_DEFINE} -I${PROJECT_SOURCE_DIR}/src -DGMX_CPUINFO_STANDALONE ${GMX_STDLIB_CXX_FLAGS} -DGMX_TARGET_X86=${GMX_TARGET_X86_VALUE}) + set(LINK_LIBRARIES "${GMX_STDLIB_LIBRARIES}") + try_compile(CPU_DETECTION_COMPILED + "${PROJECT_BINARY_DIR}" + diff --git a/repo/gromacs/gmxDetectSimd-cmake-3.14.patch b/repo/gromacs/gmxDetectSimd-cmake-3.14.patch new file mode 100644 index 000000000..f30ec78a7 --- /dev/null +++ b/repo/gromacs/gmxDetectSimd-cmake-3.14.patch @@ -0,0 +1,11 @@ +--- a/cmake/gmxDetectSimd.cmake ++++ b/cmake/gmxDetectSimd.cmake +@@ -77,7 +77,7 @@ function(gmx_suggest_simd _suggested_simd) + else() + set(GMX_TARGET_X86_VALUE 0) + endif() +- set(_compile_definitions "${GCC_INLINE_ASM_DEFINE} -I${CMAKE_SOURCE_DIR}/src -DGMX_CPUINFO_STANDALONE ${GMX_STDLIB_CXX_FLAGS} -DGMX_TARGET_X86=${GMX_TARGET_X86_VALUE}") ++ set(_compile_definitions ${GCC_INLINE_ASM_DEFINE} -I${CMAKE_SOURCE_DIR}/src -DGMX_CPUINFO_STANDALONE ${GMX_STDLIB_CXX_FLAGS} -DGMX_TARGET_X86=${GMX_TARGET_X86_VALUE}) + + # Prepare a default suggestion + set(OUTPUT_SIMD "None") diff --git a/repo/gromacs/package.py b/repo/gromacs/package.py new file mode 100644 index 000000000..99f80170f --- /dev/null +++ b/repo/gromacs/package.py @@ -0,0 +1,708 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +import os + +import llnl.util.filesystem as fs + +from spack.package import * + + +class Gromacs(CMakePackage, CudaPackage, ROCmPackage): + """GROMACS is a molecular dynamics package primarily designed for simulations + of proteins, lipids and nucleic acids. It was originally developed in + the Biophysical Chemistry department of University of Groningen, and is now + maintained by contributors in universities and research centers across the world. + + GROMACS is one of the fastest and most popular software packages + available and can run on CPUs as well as GPUs. It is free, open source + released under the GNU Lesser General Public License. Before the version 4.6, + GROMACS was released under the GNU General Public License. + """ + + homepage = "https://www.gromacs.org" + url = "https://ftp.gromacs.org/gromacs/gromacs-2022.2.tar.gz" + list_url = "https://ftp.gromacs.org/gromacs" + git = "https://gitlab.com/gromacs/gromacs.git" + maintainers("danielahlin", "eirrgang", "junghans") + + version("main", branch="main") + version("master", branch="main", deprecated=True) + version("2023.3", sha256="4ec8f8d0c7af76b13f8fd16db8e2c120e749de439ae9554d9f653f812d78d1cb") + version("2023.2", sha256="bce1480727e4b2bb900413b75d99a3266f3507877da4f5b2d491df798f9fcdae") + version("2023.1", sha256="eef2bb4a6cb6314cf9da47f26df2a0d27af4bf7b3099723d43601073ab0a42f4") + version("2023", sha256="ac92c6da72fbbcca414fd8a8d979e56ecf17c4c1cdabed2da5cfb4e7277b7ba8") + version("2022.6", sha256="75d277138475679dd3e334e384a71516570cde767310476687f2a5b72333ea41") + version("2022.5", sha256="083cc3c424bb93ffe86c12f952e3e5b4e6c9f6520de5338761f24b75e018c223") + version("2022.4", sha256="c511be602ff29402065b50906841def98752639b92a95f1b0a1060d9b5e27297") + version("2022.3", sha256="14cfb130ddaf8f759a3af643c04f5a0d0d32b09bc3448b16afa5b617f5e35dae") + version("2022.2", sha256="656404f884d2fa2244c97d2a5b92af148d0dbea94ad13004724b3fcbf45e01bf") + version("2022.1", sha256="85ddab5197d79524a702c4959c2c43be875e0fc471df3a35224939dce8512450") + version("2022", sha256="fad60d606c02e6164018692c6c9f2c159a9130c2bf32e8c5f4f1b6ba2dda2b68") + version("2021.7", sha256="4db7bbbfe5424de48373686ec0e8c5bfa7175d5cd74290ef1c1e840e6df67f06") + version("2021.6", sha256="52df2c1d7586fd028d9397985c68bd6dd26e6e905ead382b7e6c473d087902c3") + version("2021.5", sha256="eba63fe6106812f72711ef7f76447b12dd1ee6c81b3d8d4d0e3098cd9ea009b6") + version("2021.4", sha256="cb708a3e3e83abef5ba475fdb62ef8d42ce8868d68f52dafdb6702dc9742ba1d") + version("2021.3", sha256="e109856ec444768dfbde41f3059e3123abdb8fe56ca33b1a83f31ed4575a1cc6") + version("2021.2", sha256="d940d865ea91e78318043e71f229ce80d32b0dc578d64ee5aa2b1a4be801aadb") + version("2021.1", sha256="bc1d0a75c134e1fb003202262fe10d3d32c59bbb40d714bc3e5015c71effe1e5") + version("2021", sha256="efa78ab8409b0f5bf0fbca174fb8fbcf012815326b5c71a9d7c385cde9a8f87b") + version("2020.7", sha256="744158d8f61b0d36ffe89ec934519b7e0981a7af438897740160da648d36c2f0") + version("2020.6", sha256="d8bbe57ed3c9925a8cb99ecfe39e217f930bed47d5268a9e42b33da544bdb2ee") + version("2020.5", sha256="7b6aff647f7c8ee1bf12204d02cef7c55f44402a73195bd5f42cf11850616478") + version("2020.4", sha256="5519690321b5500c7951aaf53ff624042c3edd1a5f5d6dd1f2d802a3ecdbf4e6") + version("2020.3", sha256="903183691132db14e55b011305db4b6f4901cc4912d2c56c131edfef18cc92a9") + version("2020.2", sha256="7465e4cd616359d84489d919ec9e4b1aaf51f0a4296e693c249e83411b7bd2f3") + version("2020.1", sha256="e1666558831a3951c02b81000842223698016922806a8ce152e8f616e29899cf") + version("2020", sha256="477e56142b3dcd9cb61b8f67b24a55760b04d1655e8684f979a75a5eec40ba01") + version("2019.6", sha256="bebe396dc0db11a9d4cc205abc13b50d88225617642508168a2195324f06a358") + version("2019.5", sha256="438061a4a2d45bbb5cf5c3aadd6c6df32d2d77ce8c715f1c8ffe56156994083a") + version("2019.4", sha256="ba4366eedfc8a1dbf6bddcef190be8cd75de53691133f305a7f9c296e5ca1867") + version("2019.3", sha256="4211a598bf3b7aca2b14ad991448947da9032566f13239b1a05a2d4824357573") + version("2019.2", sha256="bcbf5cc071926bc67baa5be6fb04f0986a2b107e1573e15fadcb7d7fc4fb9f7e") + version("2019.1", sha256="b2c37ed2fcd0e64c4efcabdc8ee581143986527192e6e647a197c76d9c4583ec") + version("2019", sha256="c5b281a5f0b5b4eeb1f4c7d4dc72f96985b566561ca28acc9c7c16f6ee110d0b") + version("2018.8", sha256="776923415df4bc78869d7f387c834141fdcda930b2e75be979dc59ecfa6ebecf") + version("2018.5", sha256="32261df6f7ec4149fc0508f9af416953d056e281590359838c1ed6644ba097b8") + version("2018.4", sha256="6f2ee458c730994a8549d6b4f601ecfc9432731462f8bd4ffa35d330d9aaa891") + version("2018.3", sha256="4423a49224972969c52af7b1f151579cea6ab52148d8d7cbae28c183520aa291") + version("2018.2", sha256="4bdde8120c510b6543afb4b18f82551fddb11851f7edbd814aa24022c5d37857") + version("2018.1", sha256="4d3533340499323fece83b4a2d4251fa856376f2426c541e00b8e6b4c0d705cd") + version("2018", sha256="deb5d0b749a52a0c6083367b5f50a99e08003208d81954fb49e7009e1b1fd0e9") + version("2016.6", sha256="bac0117d2cad21f9b94fe5b854fb9ae7435b098a6da4e732ee745f18e52473d7") + version("2016.5", sha256="57db26c6d9af84710a1e0c47a1f5bf63a22641456448dcd2eeb556ebd14e0b7c") + version("2016.4", sha256="4be9d3bfda0bdf3b5c53041e0b8344f7d22b75128759d9bfa9442fe65c289264") + version("2016.3", sha256="7bf00e74a9d38b7cef9356141d20e4ba9387289cbbfd4d11be479ef932d77d27") + version("5.1.5", sha256="c25266abf07690ecad16ed3996899b1d489cbb1ef733a1befb3b5c75c91a703e") + version("5.1.4", sha256="0f3793d8f1f0be747cf9ebb0b588fb2b2b5dc5acc32c3046a7bee2d2c03437bc") + version("5.1.2", sha256="39d6f1d7ae8ba38cea6089da40676bfa4049a49903d21551abc030992a58f304") + version("4.6.7", sha256="6afb1837e363192043de34b188ca3cf83db6bd189601f2001a1fc5b0b2a214d9") + version("4.5.5", sha256="e0605e4810b0d552a8761fef5540c545beeaf85893f4a6e21df9905a33f871ba") + + variant( + "mpi", default=True, description="Activate MPI support (disable for Thread-MPI support)" + ) + variant("shared", default=True, description="Enables the build of shared libraries") + variant( + "double", + default=False, + description="Produces a double precision version of the executables", + ) + variant( + "cufftmp", + default=False, + when="@2022: +cuda+mpi", + description="Enable multi-GPU FFT support with cuFFTMp", + ) + variant( + "heffte", + default=False, + when="@2021: +sycl+mpi", + description="Enable multi-GPU FFT support with HeFFTe", + ) + variant("opencl", default=False, description="Enable OpenCL support") + variant("sycl", default=False, when="@2021:", description="Enable SYCL support") + variant( + "intel-data-center-gpu-max", + default=False, + when="@2022: +sycl", + description="Enable support for Intel Data Center GPU Max", + ) + variant("nosuffix", default=False, description="Disable default suffixes") + variant( + "build_type", + default="Release", + description="The build type to build", + values=( + "Debug", + "Release", + "RelWithDebInfo", + "MinSizeRel", + "Reference", + "RelWithAssert", + "Profile", + ), + ) + variant( + "nblib", + default=True, + when="@2021:", + description="Build and install the NB-LIB C++ API for GROMACS", + ) + variant( + "gmxapi", + default=True, + when="@2019:", + description="Build and install the gmxlib python API for GROMACS", + ) + variant( + "mdrun_only", + default=False, + description="Enables the build of a cut-down version" + " of libgromacs and/or the mdrun program", + ) + conflicts( + "+mdrun_only", when="@2021:", msg="mdrun-only build option was removed for GROMACS 2021." + ) + variant("openmp", default=True, description="Enables OpenMP at configure time") + variant("openmp_max_threads", default="none", description="Max number of OpenMP threads") + conflicts( + "+openmp_max_threads", when="~openmp", msg="OpenMP is off but OpenMP Max threads is set" + ) + variant( + "sve", + default=True, + description="Enable SVE on aarch64 if available", + when="target=neoverse_v1", + ) + variant( + "sve", default=True, description="Enable SVE on aarch64 if available", when="target=a64fx" + ) + variant( + "relaxed_double_precision", + default=False, + description="GMX_RELAXED_DOUBLE_PRECISION, use only for Fujitsu PRIMEHPC", + ) + conflicts( + "+relaxed_double_precision", + when="@2021:", + msg="GMX_RELAXED_DOUBLE_PRECISION option removed for GROMACS 2021.", + ) + variant("hwloc", default=True, description="Use the hwloc portable hardware locality library") + variant("cycle_subcounters", default=False, description="Enables cycle subcounters") + + variant("cp2k", default=False, description="CP2K QM/MM interface integration") + conflicts( + "+cp2k", when="@:2021", msg="CP2K QM/MM support have been introduced in GROMACS 2022" + ) + conflicts("+shared", when="+cp2k", msg="Enabling CP2K requires static build") + conflicts("%intel", when="@2022:", msg="GROMACS %intel support was removed in version 2022") + conflicts("%gcc@:8", when="@2023:", msg="GROMACS requires GCC 9 or later since version 2023") + conflicts( + "^intel-oneapi-mkl@:2021.2", + when="@2023:", + msg="GROMACS requires oneMKL 2021.3 or later since version 2023", + ) + + depends_on("mpi", when="+mpi") + + # Plumed 2.9.0 needs Gromacs 2023, 2022.5, 2021.7, 2020.7 + # Plumed 2.8.3 needs Gromacs 2022.5, 2021.7, 2020.7, 2019.6 + # Plumed 2.8.2 needs Gromacs 2022.5, 2021.7, 2020.7, 2019.6 + # Plumed 2.8.1 needs Gromacs 2022.3, 2021.6, 2020.7, 2019.6 + # Plumed 2.8.0 needs Gromacs 2021.4, 2020.6, 2019.6 + # Plumed 2.7.6 needs Gromacs 2021.5, 2020.6, 2019.6 + # Plumed 2.7.5 needs Gromacs 2021.5, 2020.6, 2019.6 + # Plumed 2.7.4 needs Gromacs 2021.4, 2020.6, 2019.6 + # Plumed 2.7.3 needs Gromacs 2021.4, 2020.6, 2019.6 + # Plumed 2.7.2 needs Gromacs 2021, 2020.6, 2019.6 + # Plumed 2.7.1 needs Gromacs 2021, 2020.5, 2019.6 + # Plumed 2.7.0 needs Gromacs 2020.4, 2019.6 + # Plumed 2.6.6 needs Gromacs 2020.4, 2019.6, 2018.8 + # Plumed 2.6.5 needs Gromacs 2020.4, 2019.6, 2018.8 + # Plumed 2.6.4 needs Gromacs 2020.4, 2019.6, 2018.8 + # Plumed 2.6.3 needs Gromacs 2020.4, 2019.6, 2018.8 + # Plumed 2.6.2 needs Gromacs 2020.4, 2019.6, 2018.8 + # Plumed 2.6.1 needs Gromacs 2020.2, 2019.6, 2018.8 + # Plumed 2.6.0 needs Gromacs 2019.4, 2018.8 + # Plumed 2.5.7 needs Gromacs 2019.4, 2018.8, 2016.6 + # Plumed 2.5.6 needs Gromacs 2019.4, 2018.8, 2016.6 + # Plumed 2.5.5 needs Gromacs 2019.4, 2018.8, 2016.6 + # Plumed 2.5.4 needs Gromacs 2019.4, 2018.8, 2016.6 + # Plumed 2.5.3 needs Gromacs 2019.4, 2018.8, 2016.6 + # Plumed 2.5.2 needs Gromacs 2019.2, 2018.6, 2016.6 + # Plumed 2.5.1 needs Gromacs 2018.6, 2016.6 + # Plumed 2.5.0 needs Gromacs 2018.4, 2016.5 + + # Above dependencies can be verified, and new versions added, by going to + # https://github.com/plumed/plumed2/tree/v2.9.0/patches + # and switching tags. + plumed_patches = { + "2023": "2.9.0", + "2022.5": "2.8.2:2.9.0", + "2022.3": "2.8.1", + "2021.7": "2.8.2:2.9.0", + "2021.6": "2.8.1", + "2021.5": "2.7.5:2.7.6", + "2021.4": "2.7.3:2.8.0", + "2021": "2.7.1:2.7.2", + "2020.7": "2.8.1:2.9.0", + "2020.6": "2.7.2:2.8.0", + "2020.5": "2.7.1", + "2020.4": "2.6.2:2.7.0", + "2020.2": "2.6.1", + "2019.6": "2.6.1:2.8.3", + "2019.4": "2.5.3:2.6.0", + "2019.2": "2.5.2", + "2018.8": "2.5.3:2.6", + "2018.6": "2.5.1:2.5.2", + "2018.4": "2.5.0", + "2016.6": "2.5.1:2.5", + "2016.5": "2.5.0", + } + + variant( + "plumed", + default=False, + description="Enable PLUMED support", + when="@{0}".format(",".join(plumed_patches.keys())), + ) + with when("+plumed"): + depends_on("plumed+mpi", when="+mpi") + depends_on("plumed~mpi", when="~mpi") + for gmx_ver, plumed_vers in plumed_patches.items(): + depends_on("plumed@{0}".format(plumed_vers), when="@{0}+plumed".format(gmx_ver)) + + variant( + "intel_provided_gcc", + default=False, + description="Use this if Intel compiler is installed through spack." + + "The g++ location is written to icp{c,x}.cfg", + ) + + depends_on("fftw-api@3") + depends_on("cmake@2.8.8:3", type="build") + depends_on("cmake@3.4.3:3", type="build", when="@2018:") + depends_on("cmake@3.9.6:3", type="build", when="@2020") + depends_on("cmake@3.13.0:3", type="build", when="@2021") + depends_on("cmake@3.16.3:3", type="build", when="@2022:") + depends_on("cmake@3.18.4:3", type="build", when="@main") + depends_on("cmake@3.16.0:3", type="build", when="%fj") + depends_on("cuda", when="+cuda") + depends_on("sycl", when="+sycl") + depends_on("lapack") + depends_on("blas") + depends_on("hipsycl", when="+rocm") + depends_on("gcc", when="%oneapi ~intel_provided_gcc") + depends_on("gcc", when="%intel ~intel_provided_gcc") + + depends_on("hwloc@1.0:1", when="+hwloc@2016:2018") + depends_on("hwloc", when="+hwloc@2019:") + + depends_on("cp2k@8.1:", when="+cp2k") + + depends_on("nvhpc", when="+cufftmp") + depends_on("heffte", when="+heffte") + + conflicts("^hipsycl~rocm", when="+rocm") + + requires( + "%intel", + "%oneapi", + policy="one_of", + when="+intel_provided_gcc", + msg="Only attempt to find gcc libs for Intel compiler if Intel compiler is used.", + ) + + # If the Intel suite is used for Lapack, it must be used for fftw and vice-versa + for _intel_pkg in INTEL_MATH_LIBRARIES: + requires(f"^[virtuals=fftw-api] {_intel_pkg}", when=f"^[virtuals=lapack] {_intel_pkg}") + requires(f"^[virtuals=lapack] {_intel_pkg}", when=f"^[virtuals=fftw-api] {_intel_pkg}") + + patch("gmxDetectCpu-cmake-3.14.patch", when="@2018:2019.3^cmake@3.14.0:") + patch("gmxDetectSimd-cmake-3.14.patch", when="@5.0:2017^cmake@3.14.0:") + # 2021.2 will always try to build tests (see https://gromacs.bioexcel.eu/t/compilation-failure-for-gromacs-2021-1-and-2021-2-with-cmake-3-20-2/2129) + patch( + "https://gitlab.com/gromacs/gromacs/-/commit/10262892e11a87fda0f59e633c89ed5ab1100509.diff", + sha256="2c30d00404b76421c13866cc42afa5e63276f7926c862838751b158df8727b1b", + when="@2021.1:2021.2", + ) + + filter_compiler_wrappers( + "*.cmake", relative_root=os.path.join("share", "cmake", "gromacs_mpi") + ) + filter_compiler_wrappers("*.cmake", relative_root=os.path.join("share", "cmake", "gromacs")) + + def patch(self): + # Otherwise build fails with GCC 11 (11.2) + if self.spec.satisfies("@2018:2020.6"): + filter_file( + "#include ", + "#include \n#include ", + "src/gromacs/awh/biasparams.h", + ) + if self.spec.satisfies("@2018:2018.8"): + filter_file( + "#include ", + "#include \n#include ", + "src/gromacs/mdlib/minimize.cpp", + ) + if self.spec.satisfies("@2019:2019.6,2020:2020.6"): + filter_file( + "#include ", + "#include \n#include ", + "src/gromacs/mdrun/minimize.cpp", + ) + if self.spec.satisfies("@2020:2020.6"): + filter_file( + "#include ", + "#include \n#include ", + "src/gromacs/modularsimulator/modularsimulator.h", + ) + # Ref: https://gitlab.com/gromacs/gromacs/-/merge_requests/3504 + if self.spec.satisfies("@2023"): + filter_file( + " if (std::filesystem::equivalent(searchPath, buildBinPath))", + " if (std::error_code c; std::filesystem::equivalent(searchPath," + " buildBinPath, c))", + "src/gromacs/commandline/cmdlineprogramcontext.cpp", + string=True, + ) + + if "+plumed" in self.spec: + self.spec["plumed"].package.apply_patch(self) + + if self.spec.satisfies("%nvhpc"): + # Disable obsolete workaround + filter_file("ifdef __PGI", "if 0", "src/gromacs/fileio/xdrf.h") + + if "+cuda" in self.spec: + # Upstream supports building of last two major versions of Gromacs. + # Older versions of Gromacs need to be patched to build with more recent + # versions of CUDA library. + + # Hardware version 3.0 is supported up to CUDA 10.2 (Gromacs 4.6-2020.3 + # needs to be patched, 2020.4 is handling it correctly) + + if self.spec.satisfies("@4.6:2020.3^cuda@11:"): + filter_file( + r"-gencode;arch=compute_30,code=sm_30;?", "", "cmake/gmxManageNvccConfig.cmake" + ) + filter_file( + r"-gencode;arch=compute_30,code=compute_30;?", + "", + "cmake/gmxManageNvccConfig.cmake", + ) + + # Hardware version 2.0 is supported up to CUDA 8 (Gromacs 4.6-2016.3 + # needs to be patched, 2016.4 is handling it correctly, removed in 2019) + + if self.spec.satisfies("@4.6:2016.3^cuda@9:"): + filter_file( + r"-gencode;arch=compute_20,code=sm_20;?", "", "cmake/gmxManageNvccConfig.cmake" + ) + filter_file( + r"-gencode;arch=compute_20,code=compute_20;?", + "", + "cmake/gmxManageNvccConfig.cmake", + ) + + if self.spec.satisfies("@4.6:5.0^cuda@9:"): + filter_file( + r"-gencode;arch=compute_20,code=sm_21;?", "", "cmake/gmxManageNvccConfig.cmake" + ) + + +class CMakeBuilder(spack.build_systems.cmake.CMakeBuilder): + @run_after("build") + def build_test_binaries(self): + """Build the test binaries. + + GROMACS usually excludes tests from the default build target, but building + the tests during spack's ``check`` phase takes a long time while producing + no visible output, even with ``--verbose``. + + Here, we make sure the test binaries are built during the build phase + (as would normally be expected when configured with BUILD_TESTING) + when the ``--test`` flag is used. + + Note: the GMX_DEVELOPER_BUILD option disables the EXCLUDE_FROM_ALL on the + test binaries, but the option incurs additional side effects that may + not be intended with ``--test``. + """ + if self.pkg.run_tests: + with fs.working_dir(self.build_directory): + make("tests") + + def check(self): + """Run the ``check`` target (skipping the ``test`` target). + + Override the standard CMakeBuilder behavior. GROMACS has both `test` + and `check` targets, but we are only interested in the latter. + """ + with fs.working_dir(self.build_directory): + if self.generator == "Unix Makefiles": + make("check") + elif self.generator == "Ninja": + ninja("check") + + def cmake_args(self): + options = [] + # Warning: Use `define_from_variant()` with caution. + # GROMACS may use unexpected conventions for CMake variable values. + # For example: variables that accept boolean values like "OFF" + # may actually be STRING type, and undefined variables may trigger + # different defaults for dependent options than explicitly defined variables. + # `-DGMX_VAR=OFF` may not have the same meaning as `-DGMX_VAR=`. + # In other words, the mapping between package variants and the + # GMX CMake variables is often non-trivial. + + if "+mpi" in self.spec: + options.append("-DGMX_MPI:BOOL=ON") + if self.pkg.version < Version("2020"): + # Ensures gmxapi builds properly + options.extend( + [ + "-DCMAKE_C_COMPILER=%s" % self.spec["mpi"].mpicc, + "-DCMAKE_CXX_COMPILER=%s" % self.spec["mpi"].mpicxx, + "-DCMAKE_Fortran_COMPILER=%s" % self.spec["mpi"].mpifc, + ] + ) + elif self.pkg.version == Version("2021"): + # Work around https://gitlab.com/gromacs/gromacs/-/issues/3896 + # Ensures gmxapi builds properly + options.extend( + [ + "-DCMAKE_C_COMPILER=%s" % self.spec["mpi"].mpicc, + "-DCMAKE_CXX_COMPILER=%s" % self.spec["mpi"].mpicxx, + ] + ) + else: + options.extend( + [ + "-DCMAKE_C_COMPILER=%s" % spack_cc, + "-DCMAKE_CXX_COMPILER=%s" % spack_cxx, + "-DMPI_C_COMPILER=%s" % self.spec["mpi"].mpicc, + "-DMPI_CXX_COMPILER=%s" % self.spec["mpi"].mpicxx, + ] + ) + else: + options.extend( + [ + "-DCMAKE_C_COMPILER=%s" % spack_cc, + "-DCMAKE_CXX_COMPILER=%s" % spack_cxx, + "-DGMX_MPI:BOOL=OFF", + "-DGMX_THREAD_MPI:BOOL=ON", + ] + ) + + if self.spec.satisfies("%aocc"): + options.append("-DCMAKE_CXX_FLAGS=--stdlib=libc++") + + if self.spec.satisfies("@2020:"): + options.append("-DGMX_INSTALL_LEGACY_API=ON") + + if self.spec.satisfies("%oneapi") or self.spec.satisfies("%intel"): + # If intel-oneapi-compilers was installed through spack the gcc is added to the + # configuration file. + if self.spec.satisfies("+intel_provided_gcc") and os.path.exists( + ".".join([os.environ["SPACK_CXX"], "cfg"]) + ): + with open(".".join([os.environ["SPACK_CXX"], "cfg"]), "r") as f: + options.append("-DCMAKE_CXX_FLAGS={}".format(f.read())) + else: + options.append("-DGMX_GPLUSPLUS_PATH=%s/g++" % self.spec["gcc"].prefix.bin) + + if "+double" in self.spec: + options.append("-DGMX_DOUBLE:BOOL=ON") + + if "+nosuffix" in self.spec: + options.append("-DGMX_DEFAULT_SUFFIX:BOOL=OFF") + + if "~shared" in self.spec: + options.append("-DBUILD_SHARED_LIBS:BOOL=OFF") + options.append("-DGMXAPI:BOOL=OFF") + + if "+hwloc" in self.spec: + options.append("-DGMX_HWLOC:BOOL=ON") + else: + options.append("-DGMX_HWLOC:BOOL=OFF") + + if self.pkg.version >= Version("2021"): + if "+cuda" in self.spec: + options.append("-DGMX_GPU:STRING=CUDA") + elif "+opencl" in self.spec: + options.append("-DGMX_GPU:STRING=OpenCL") + elif "+rocm" in self.spec: + archs = self.spec.variants["amdgpu_target"].value + arch_str = ",".join(archs) + options.extend( + [ + "-DGMX_GPU:STRING=SYCL", + "-DGMX_SYCL_HIPSYCL=ON", + f"-DHIPSYCL_TARGETS=hip:{arch_str}", + ] + ) + elif "+sycl" in self.spec: + options.append("-DGMX_GPU:STRING=SYCL") + else: + options.append("-DGMX_GPU:STRING=OFF") + else: + if "+cuda" in self.spec or "+opencl" in self.spec: + options.append("-DGMX_GPU:BOOL=ON") + if "+opencl" in self.spec: + options.append("-DGMX_USE_OPENCL=ON") + else: + options.append("-DGMX_GPU:BOOL=OFF") + + if "+cuda" in self.spec: + options.append("-DCUDA_TOOLKIT_ROOT_DIR:STRING=" + self.spec["cuda"].prefix) + + target = self.spec.target + if "+cuda" in self.spec and target.family == "ppc64le": + options.append("-DGMX_EXTERNAL_LAPACK:BOOL=OFF") + else: + options.append("-DGMX_EXTERNAL_LAPACK:BOOL=ON") + if self.spec["lapack"].libs: + options.append("-DGMX_LAPACK_USER={0}".format(self.spec["lapack"].libs.joined(";"))) + + if "+cuda" in self.spec and target.family == "ppc64le": + options.append("-DGMX_EXTERNAL_BLAS:BOOL=OFF") + else: + options.append("-DGMX_EXTERNAL_BLAS:BOOL=ON") + if self.spec["blas"].libs: + options.append("-DGMX_BLAS_USER={0}".format(self.spec["blas"].libs.joined(";"))) + + if "+cp2k" in self.spec: + options.append("-DGMX_CP2K:BOOL=ON") + options.append("-DCP2K_DIR:STRING={0}".format(self.spec["cp2k"].prefix)) + + if "+cufftmp" in self.spec: + options.append("-DGMX_USE_CUFFTMP=ON") + options.append( + f'-DcuFFTMp_ROOT={self.spec["nvhpc"].prefix}/Linux_{self.spec.target.family}' + + f'/{self.spec["nvhpc"].version}/math_libs' + ) + + if "+heffte" in self.spec: + options.append("-DGMX_USE_HEFFTE=on") + options.append(f'-DHeffte_ROOT={self.spec["heffte"].prefix}') + + if "+intel-data-center-gpu-max" in self.spec: + options.append("-DGMX_GPU_NB_CLUSTER_SIZE=8") + options.append("-DGMX_GPU_NB_NUM_CLUSTER_PER_CELL_X=1") + + if "~nblib" in self.spec: + options.append("-DGMX_INSTALL_NBLIB_API=OFF") + if "~gmxapi" in self.spec: + options.append("-DGMXAPI=OFF") + + # Activate SIMD based on properties of the target + target = self.spec.target + if target >= "zen4": + # AMD Family 17h (EPYC Genoa) + options.append("-DGMX_SIMD=AVX_512") + elif target >= "zen2": + # AMD Family 17h (EPYC Rome) + options.append("-DGMX_SIMD=AVX2_256") + elif target >= "zen": + # AMD Family 17h (EPYC Naples) + options.append("-DGMX_SIMD=AVX2_128") + elif target >= "bulldozer": + # AMD Family 15h + options.append("-DGMX_SIMD=AVX_128_FMA") + elif target.family == "ppc64le": + options.append("-DGMX_SIMD=None") + elif "vsx" in target: + # IBM Power 7 and beyond + if self.spec.satisfies("%nvhpc"): + options.append("-DGMX_SIMD=None") + else: + options.append("-DGMX_SIMD=IBM_VSX") + elif target.family == "aarch64": + # ARMv8 + if self.spec.satisfies("%nvhpc"): + options.append("-DGMX_SIMD=None") + elif "sve" in target.features and "+sve" in self.spec: + options.append("-DGMX_SIMD=ARM_SVE") + else: + options.append("-DGMX_SIMD=ARM_NEON_ASIMD") + elif target == "mic_knl": + # Intel KNL + options.append("-DGMX_SIMD=AVX_512_KNL") + else: + # Other architectures + simd_features = [ + ("sse2", "SSE2"), + ("sse4_1", "SSE4.1"), + ("avx", "AVX_256"), + ("axv128", "AVX2_128"), + ("avx2", "AVX2_256"), + ("avx512", "AVX_512"), + ] + + # Workaround NVIDIA compiler bug when avx512 is enabled + if self.spec.satisfies("%nvhpc") and ("avx512", "AVX_512") in simd_features: + simd_features.remove(("avx512", "AVX_512")) + + feature_set = False + for feature, flag in reversed(simd_features): + if feature in target: + options.append("-DGMX_SIMD:STRING={0}".format(flag)) + feature_set = True + break + + # Fall back + if not feature_set: + options.append("-DGMX_SIMD:STRING=None") + + # Use the 'rtdscp' assembly instruction only on + # appropriate architectures + options.append(self.define("GMX_USE_RDTSCP", str(target.family) in ("x86_64", "x86"))) + + if self.spec.satisfies("@:2020"): + options.append(self.define_from_variant("GMX_BUILD_MDRUN_ONLY", "mdrun_only")) + + options.append(self.define_from_variant("GMX_OPENMP", "openmp")) + + if self.spec.satisfies("@:2020"): + options.append( + self.define_from_variant( + "GMX_RELAXED_DOUBLE_PRECISION", "relaxed_double_precision" + ) + ) + + if "+cycle_subcounters" in self.spec: + options.append("-DGMX_CYCLE_SUBCOUNTERS:BOOL=ON") + else: + options.append("-DGMX_CYCLE_SUBCOUNTERS:BOOL=OFF") + + if "+openmp" in self.spec and self.spec.variants["openmp_max_threads"].value != "none": + options.append( + "-DGMX_OPENMP_MAX_THREADS=%s" % self.spec.variants["openmp_max_threads"].value + ) + + if self.spec["lapack"].name in INTEL_MATH_LIBRARIES: + # fftw-api@3 is provided by intel-mkl or intel-parllel-studio + # we use the mkl interface of gromacs + options.append("-DGMX_FFT_LIBRARY=mkl") + if self.spec.satisfies("@:2022"): + options.append( + "-DMKL_INCLUDE_DIR={0}".format(self.spec["mkl"].headers.directories[0]) + ) + # The 'blas' property provides a minimal set of libraries + # that is sufficient for fft. Using full mkl fails the cmake test + options.append("-DMKL_LIBRARIES={0}".format(self.spec["blas"].libs.joined(";"))) + else: + # we rely on the fftw-api@3 + options.append("-DGMX_FFT_LIBRARY=fftw3") + if "^amdfftw" in self.spec: + options.append("-DGMX_FFT_LIBRARY=fftw3") + options.append( + "-DFFTWF_INCLUDE_DIRS={0}".format(self.spec["amdfftw"].headers.directories[0]) + ) + options.append( + "-DFFTWF_LIBRARIES={0}".format(self.spec["amdfftw"].libs.joined(";")) + ) + elif "^armpl-gcc" in self.spec: + options.append( + "-DFFTWF_INCLUDE_DIR={0}".format(self.spec["armpl-gcc"].headers.directories[0]) + ) + options.append( + "-DFFTWF_LIBRARY={0}".format(self.spec["armpl-gcc"].libs.joined(";")) + ) + elif "^acfl" in self.spec: + options.append( + "-DFFTWF_INCLUDE_DIR={0}".format(self.spec["acfl"].headers.directories[0]) + ) + options.append("-DFFTWF_LIBRARY={0}".format(self.spec["acfl"].libs.joined(";"))) + + # Ensure that the GROMACS log files report how the code was patched + # during the build, so that any problems are easier to diagnose. + if "+plumed" in self.spec: + options.append("-DGMX_VERSION_STRING_OF_FORK=PLUMED-spack") + else: + options.append("-DGMX_VERSION_STRING_OF_FORK=spack") + return options diff --git a/repo/hipsycl/package.py b/repo/hipsycl/package.py new file mode 100644 index 000000000..2de965139 --- /dev/null +++ b/repo/hipsycl/package.py @@ -0,0 +1,171 @@ +# Copyright 2023 Lawrence Livermore National Security, LLC and other +# Benchpark Project Developers. See the top-level COPYRIGHT file for details. +# +# SPDX-License-Identifier: Apache-2.0 + +import json +from os import path + +from llnl.util import filesystem + +from spack.package import * + + +class Hipsycl(CMakePackage, ROCmPackage): + """hipSYCL is an implementation of the SYCL standard programming model + over NVIDIA CUDA/AMD HIP""" + + homepage = "https://github.com/illuhad/hipSYCL" + url = "https://github.com/illuhad/hipSYCL/archive/v0.8.0.tar.gz" + git = "https://github.com/illuhad/hipSYCL.git" + + maintainers("nazavode") + + provides("sycl") + + version("stable", branch="stable", submodules=True) + version( + "23.10.0", + commit="3952b468c9da89edad9dff953cdcab0a3c3bf78c", + submodules=True, + get_full_repo=True, + ) + version("0.9.4", commit="99d9e24d462b35e815e0e59c1b611936c70464ae", submodules=True) + version("0.9.3", commit="51507bad524c33afe8b124804091b10fa25618dc", submodules=True) + version("0.9.2", commit="49fd02499841ae884c61c738610e58c27ab51fdb", submodules=True) + version("0.9.1", commit="fe8465cd5399a932f7221343c07c9942b0fe644c", submodules=True) + version("0.8.0", commit="2daf8407e49dd32ebd1c266e8e944e390d28b22a", submodules=True) + version("develop", branch="develop", submodules=True) + + variant("cuda", default=False, description="Enable CUDA backend for SYCL kernels") + + depends_on("cmake@3.5:", type="build") + depends_on("boost +filesystem", when="@:0.8") + depends_on("boost@1.67.0:1.69.0 +filesystem +fiber +context cxxstd=17", when="@0.9.1:") + depends_on("python@3:") + depends_on("libllvm@8:", when="~cuda") + depends_on("libllvm@9:", when="+cuda") + depends_on("llvm+clang", when="^llvm") + # hipSYCL 0.8.0 supported only LLVM 8-10: + # (https://github.com/AdaptiveCpp/AdaptiveCpp/blob/v0.8.0/CMakeLists.txt#L29-L37) + depends_on("libllvm@8:10", when="@0.8.0") + # https://github.com/OpenSYCL/OpenSYCL/pull/918 was introduced after 0.9.4 + conflicts("^libllvm@16:", when="@:0.9.4") + # LLVM PTX backend requires cuda7:10.1 (https://tinyurl.com/v82k5qq) + depends_on("cuda@9:10.1", when="@0.8.1: +cuda ^llvm@9") + depends_on("cuda@9:", when="@0.8.1: +cuda ^llvm@10:") + # hipSYCL@:0.8.0 requires cuda@9:10.0 due to a known bug + depends_on("cuda@9:10.0", when="@:0.8.0 +cuda") + + conflicts( + "%gcc@:4", + when="@:0.9.0", + msg="hipSYCL needs proper C++14 support to be built, %gcc is too old", + ) + conflicts( + "%gcc@:8", + when="@0.9.1:", + msg="hipSYCL needs proper C++17 support to be built, %gcc is too old", + ) + conflicts( + "^llvm build_type=Debug", + when="+cuda", + msg="LLVM debug builds don't work with hipSYCL CUDA backend; for " + "further info please refer to: " + "https://github.com/illuhad/hipSYCL/blob/master/doc/install-cuda.md", + ) + + def cmake_args(self): + spec = self.spec + args = [ + "-DWITH_CPU_BACKEND:Bool=TRUE", + "-DWITH_ROCM_BACKEND:Bool={0}".format("TRUE" if "+rocm" in spec else "FALSE"), + "-DWITH_CUDA_BACKEND:Bool={0}".format("TRUE" if "+cuda" in spec else "FALSE"), + # prevent hipSYCL's cmake to look for other LLVM installations + # if the specified one isn't compatible + "-DDISABLE_LLVM_VERSION_CHECK:Bool=TRUE", + ] + args += [f"-DACPP_VERSION_SUFFIX=spack-{self.version}"] + if "+rocm" in spec: + args += [f"-DHIP_CXX_COMPILER={self.compiler.cxx}"] + # LLVM directory containing all installed CMake files + # (e.g.: configs consumed by client projects) + llvm_cmake_dirs = filesystem.find(spec["libllvm"].prefix, "LLVMExports.cmake") + if len(llvm_cmake_dirs) != 1: + raise InstallError( + "concretized llvm dependency must provide " + "a unique directory containing CMake client " + "files, found: {0}".format(llvm_cmake_dirs) + ) + args.append("-DLLVM_DIR:String={0}".format(path.dirname(llvm_cmake_dirs[0]))) + # clang internal headers directory + llvm_clang_include_dirs = filesystem.find( + spec["libllvm"].prefix, "__clang_cuda_runtime_wrapper.h" + ) + if len(llvm_clang_include_dirs) != 1: + raise InstallError( + "concretized llvm dependency must provide a " + "unique directory containing clang internal " + "headers, found: {0}".format(llvm_clang_include_dirs) + ) + args.append( + "-DCLANG_INCLUDE_PATH:String={0}".format(path.dirname(llvm_clang_include_dirs[0])) + ) + # target clang++ executable + llvm_clang_bin = path.join(spec["libllvm"].prefix.bin, "clang++") + if not filesystem.is_exe(llvm_clang_bin): + raise InstallError( + "concretized llvm dependency must provide a " + "valid clang++ executable, found invalid: " + "{0}".format(llvm_clang_bin) + ) + args.append("-DCLANG_EXECUTABLE_PATH:String={0}".format(llvm_clang_bin)) + # explicit CUDA toolkit + if "+cuda" in spec: + args.append("-DCUDA_TOOLKIT_ROOT_DIR:String={0}".format(spec["cuda"].prefix)) + return args + + @run_after("install") + def filter_config_file(self): + config_file_paths = filesystem.find(self.prefix, "syclcc.json") + if len(config_file_paths) != 1: + raise InstallError( + "installed hipSYCL must provide a unique compiler driver " + "configuration file, found: {0}".format(config_file_paths) + ) + config_file_path = config_file_paths[0] + with open(config_file_path) as f: + config = json.load(f) + # 1. Fix compiler: use the real one in place of the Spack wrapper + config["default-cpu-cxx"] = self.compiler.cxx + if "+cuda" in self.spec: + # 2. Fix stdlib: we need to make sure cuda-enabled binaries find + # the libc++.so and libc++abi.so dyn linked to the sycl + # ptx backend + rpaths = set() + so_paths = filesystem.find_libraries( + "libc++", self.spec["libllvm"].prefix, shared=True, recursive=True + ) + if len(so_paths) != 1: + raise InstallError( + "concretized llvm dependency must provide a " + "unique directory containing libc++.so, " + "found: {0}".format(so_paths) + ) + rpaths.add(path.dirname(so_paths[0])) + so_paths = filesystem.find_libraries( + "libc++abi", self.spec["libllvm"].prefix, shared=True, recursive=True + ) + if len(so_paths) != 1: + raise InstallError( + "concretized llvm dependency must provide a " + "unique directory containing libc++abi, " + "found: {0}".format(so_paths) + ) + rpaths.add(path.dirname(so_paths[0])) + config["default-cuda-link-line"] += " " + " ".join( + "-rpath {0}".format(p) for p in rpaths + ) + # Replace the installed config file + with open(config_file_path, "w") as f: + json.dump(config, f, indent=2)