diff --git a/tests/benchmark-models/test_petab_benchmark.py b/tests/benchmark-models/test_petab_benchmark.py index 9fa94891e7..f8ff77b2aa 100644 --- a/tests/benchmark-models/test_petab_benchmark.py +++ b/tests/benchmark-models/test_petab_benchmark.py @@ -205,9 +205,9 @@ def test_benchmark_gradient(model, scale, sensitivity_method): ) noise_level = 0.1 - np.random.seed(0) + # find a point where the derivative can be computed for _ in range(5): if scale: point = np.asarray( @@ -244,26 +244,12 @@ def test_benchmark_gradient(model, scale, sensitivity_method): ) if debug: - df = pd.DataFrame( - [ - { - ( - "fd", - r.metadata["size_absolute"], - str(r.method_id), - ): r.value - for c in d.computers - for r in c.results - } - for d in derivative.directional_derivatives - ], - index=parameter_ids, + write_debug_output( + debug_path / f"{model}_scale={scale}.tsv", + derivative, + expected_derivative, + parameter_ids, ) - df[("fd", "full", "")] = derivative.series.values - df[("amici", "", "")] = expected_derivative - - file_name = f"{model}_scale={scale}.tsv" - df.to_csv(debug_path / file_name, sep="\t") assert_gradient_check_success( derivative, @@ -272,3 +258,27 @@ def test_benchmark_gradient(model, scale, sensitivity_method): rtol=cur_settings.rtol_check, atol=cur_settings.atol_check, ) + + +def write_debug_output( + file_name, derivative, expected_derivative, parameter_ids +): + df = pd.DataFrame( + [ + { + ( + "fd", + r.metadata["size_absolute"], + str(r.method_id), + ): r.value + for c in d.computers + for r in c.results + } + for d in derivative.directional_derivatives + ], + index=parameter_ids, + ) + df[("fd", "full", "")] = derivative.series.values + df[("amici", "", "")] = expected_derivative + + df.to_csv(file_name, sep="\t")