diff --git a/.github/workflows/test_benchmark_collection_models.yml b/.github/workflows/test_benchmark_collection_models.yml index 7c7aacb992..7b1568b1fc 100644 --- a/.github/workflows/test_benchmark_collection_models.yml +++ b/.github/workflows/test_benchmark_collection_models.yml @@ -61,6 +61,7 @@ jobs: run: | git clone --depth 1 https://github.com/benchmarking-initiative/Benchmark-Models-PEtab.git \ && export BENCHMARK_COLLECTION="$(pwd)/Benchmark-Models-PEtab/Benchmark-Models/" \ + && pip3 install -e $BENCHMARK_COLLECTION/../src/python \ && AMICI_PARALLEL_COMPILE="" tests/benchmark-models/test_benchmark_collection.sh # run gradient checks diff --git a/tests/benchmark-models/test_benchmark_collection.sh b/tests/benchmark-models/test_benchmark_collection.sh index f8ccd0c1cd..51f6ad06d5 100755 --- a/tests/benchmark-models/test_benchmark_collection.sh +++ b/tests/benchmark-models/test_benchmark_collection.sh @@ -87,6 +87,12 @@ script_path=$(cd "$script_path" && pwd) for model in $models; do yaml="${model_dir}"/"${model}"/"${model}".yaml + + # different naming scheme + if [[ "$model" == "Bertozzi_PNAS2020" ]]; then + yaml="${model_dir}"/"${model}"/problem.yaml + fi + amici_model_dir=test_bmc/"${model}" mkdir -p "$amici_model_dir" cmd_import="amici_import_petab ${yaml} -o ${amici_model_dir} -n ${model} --flatten" diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py index d0e2a6d46a..82d27f85eb 100644 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -1,6 +1,5 @@ """Tests for simulate_petab on PEtab benchmark problems.""" -import os from pathlib import Path import amici @@ -9,39 +8,32 @@ import petab.v1 as petab import pytest from amici.petab.petab_import import import_petab_problem +import benchmark_models_petab + # Absolute and relative tolerances for finite difference gradient checks. ATOL: float = 1e-3 RTOL: float = 1e-2 repo_root = Path(__file__).parent.parent.parent -benchmark_path = repo_root / "Benchmark-Models-PEtab" / "Benchmark-Models" -if not benchmark_path.exists(): - benchmark_path = Path(os.environ["BENCHMARK_COLLECTION"]) # reuse compiled models from test_benchmark_collection.sh benchmark_outdir = repo_root / "test_bmc" -models = [ - str(petab_path.stem) - for petab_path in benchmark_path.glob("*") - if petab_path.is_dir() - if str(petab_path.stem) - not in ( - # excluded due to excessive runtime - "Bachmann_MSB2011", - "Chen_MSB2009", - "Froehlich_CellSystems2018", - "Raimundez_PCB2020", - "Lucarelli_CellSystems2018", - "Isensee_JCB2018", - "Beer_MolBioSystems2014", - "Alkan_SciSignal2018", - "Lang_PLOSComputBiol2024", - # excluded due to excessive numerical failures - "Crauste_CellSystems2017", - "Fujita_SciSignal2010", - ) -] +models = set(benchmark_models_petab.MODELS) - { + # excluded due to excessive runtime + "Bachmann_MSB2011", + "Chen_MSB2009", + "Froehlich_CellSystems2018", + "Raimundez_PCB2020", + "Lucarelli_CellSystems2018", + "Isensee_JCB2018", + "Beer_MolBioSystems2014", + "Alkan_SciSignal2018", + "Lang_PLOSComputBiol2024", + # excluded due to excessive numerical failures + "Crauste_CellSystems2017", + "Fujita_SciSignal2010", +} debug = False if debug: @@ -75,9 +67,7 @@ def test_benchmark_gradient(model, scale): # only fail on linear scale pytest.skip() - petab_problem = petab.Problem.from_yaml( - benchmark_path / model / (model + ".yaml") - ) + petab_problem = benchmark_models_petab.get_problem(model) petab.flatten_timepoint_specific_output_overrides(petab_problem) # Only compute gradient for estimated parameters.