diff --git a/.github/workflows/test_ert.yml b/.github/workflows/test_ert.yml index f383e54e4b9..25edf1b0a24 100644 --- a/.github/workflows/test_ert.yml +++ b/.github/workflows/test_ert.yml @@ -7,6 +7,11 @@ on: type: string test-type: type: string + +permissions: + contents: write + deployments: write + jobs: tests-ert: @@ -56,6 +61,19 @@ jobs: run: | pytest tests --junit-xml=junit.xml -n4 --show-capture=stderr -sv -m "integration_test and not requires_window_manager" --benchmark-disable + - name: Run benchmark + run: | + pytest tests/unit_tests/analysis/test_adaptive_localization.py::test_benchmark --benchmark-json output.json + + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + name: Python Benchmark with pytest-benchmark + tool: 'pytest' + output-file-path: output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + - name: Test for a clean repository run: | # Run this before the 'Test CLI' entry below, which produces a few files that are accepted for now. Exclude the wheel. diff --git a/tests/unit_tests/analysis/test_adaptive_localization.py b/tests/unit_tests/analysis/test_adaptive_localization.py index 62dbcde8151..96adba02b20 100644 --- a/tests/unit_tests/analysis/test_adaptive_localization.py +++ b/tests/unit_tests/analysis/test_adaptive_localization.py @@ -1,4 +1,6 @@ +import uuid from argparse import ArgumentParser +from functools import partial from textwrap import dedent import numpy as np @@ -16,7 +18,7 @@ def run_cli_ES_with_case(poly_config): config_name = poly_config.split(".")[0] prior_sample_name = "prior_sample" + "_" + config_name - posterior_sample_name = "posterior_sample" + "_" + config_name + posterior_sample_name = str(uuid.uuid1()) parser = ArgumentParser(prog="test_main") parsed = ert_parser( parser, @@ -101,6 +103,34 @@ def test_that_adaptive_localization_with_cutoff_0_equals_ESupdate(copy_poly_case assert np.allclose(posterior_sample_loc0, posterior_sample_noloc) +def test_benchmark(copy_poly_case, benchmark): + # rng = np.random.default_rng(42) + # cutoff1 = rng.uniform(0, 1) + cutoff1 = 0.5 + + set_adaptive_localization_cutoff1 = dedent( + f""" + ANALYSIS_SET_VAR STD_ENKF LOCALIZATION True + ANALYSIS_SET_VAR STD_ENKF LOCALIZATION_CORRELATION_THRESHOLD {cutoff1} + """ + ) + + with open("poly.ert", "r+", encoding="utf-8") as f: + lines = f.readlines() + for i, line in enumerate(lines): + if "NUM_REALIZATIONS 100" in line: + lines[i] = "NUM_REALIZATIONS 200\n" + break + lines.insert(2, random_seed_line) + lines.insert(9, set_adaptive_localization_cutoff1) + + with open("poly_localization_cutoff1.ert", "w", encoding="utf-8") as f: + f.writelines(lines) + + run_with_cutoff1 = partial(run_cli_ES_with_case, "poly_localization_cutoff1.ert") + benchmark(run_with_cutoff1) + + @pytest.mark.integration_test def test_that_posterior_generalized_variance_increases_in_cutoff(copy_poly_case): rng = np.random.default_rng(42)