From 7f56c578b227aec0a373a44dac4376f75431c4a3 Mon Sep 17 00:00:00 2001 From: Frode Aarstad Date: Mon, 7 Oct 2024 14:05:32 +0200 Subject: [PATCH] Refactor the last usage of @tmpdir --- tests/everest/__init__.py | 3 - tests/everest/conftest.py | 28 ++++ tests/everest/entry_points/test_everexport.py | 24 +-- .../functional/test_main_everest_entry.py | 7 +- tests/everest/test_config_file_loader.py | 18 +- tests/everest/test_controls.py | 46 +++-- tests/everest/test_detached.py | 7 +- tests/everest/test_egg_simulation.py | 31 ++-- tests/everest/test_everest_initialization.py | 17 +- tests/everest/test_everest_output.py | 4 +- tests/everest/test_export.py | 9 +- tests/everest/test_multiobjective.py | 14 +- tests/everest/test_optimization_config.py | 4 +- tests/everest/test_output_constraints.py | 14 +- tests/everest/test_res_initialization.py | 157 ++++++++---------- tests/everest/test_ropt_initialization.py | 55 ++---- tests/everest/test_templating.py | 18 +- tests/everest/test_ui_run.py | 5 +- tests/everest/test_workflows.py | 5 +- tests/everest/test_yaml_parser.py | 20 +-- .../everest/unit/everest/bin/test_everload.py | 36 ++-- tests/everest/utils/__init__.py | 56 ------- 22 files changed, 226 insertions(+), 352 deletions(-) diff --git a/tests/everest/__init__.py b/tests/everest/__init__.py index 06b590ab516..e69de29bb2d 100644 --- a/tests/everest/__init__.py +++ b/tests/everest/__init__.py @@ -1,3 +0,0 @@ -from .utils import tmp - -__all__ = ["tmp"] diff --git a/tests/everest/conftest.py b/tests/everest/conftest.py index b17be861b51..62dc53d533c 100644 --- a/tests/everest/conftest.py +++ b/tests/everest/conftest.py @@ -92,6 +92,34 @@ def copy_math_func_test_data_to_tmp(tmp_path, monkeypatch): monkeypatch.chdir(tmp_path) +@pytest.fixture +def copy_mocked_test_data_to_tmp(tmp_path, monkeypatch): + path = relpath("test_data", "mocked_test_case") + shutil.copytree(path, tmp_path, dirs_exist_ok=True) + monkeypatch.chdir(tmp_path) + + +@pytest.fixture +def copy_test_data_to_tmp(tmp_path, monkeypatch): + path = relpath("test_data") + shutil.copytree(path, tmp_path, dirs_exist_ok=True) + monkeypatch.chdir(tmp_path) + + +@pytest.fixture +def copy_templete_test_data_to_tmp(tmp_path, monkeypatch): + path = relpath("test_data", "templating") + shutil.copytree(path, tmp_path, dirs_exist_ok=True) + monkeypatch.chdir(tmp_path) + + +@pytest.fixture +def copy_egg_test_data_to_tmp(tmp_path, monkeypatch): + path = relpath("..", "..", "test-data", "everest", "egg") + shutil.copytree(path, tmp_path, dirs_exist_ok=True) + monkeypatch.chdir(tmp_path) + + @pytest.fixture def change_to_tmpdir(tmp_path, monkeypatch): monkeypatch.chdir(tmp_path) diff --git a/tests/everest/entry_points/test_everexport.py b/tests/everest/entry_points/test_everexport.py index 1fad072424b..383d50c5c5a 100644 --- a/tests/everest/entry_points/test_everexport.py +++ b/tests/everest/entry_points/test_everexport.py @@ -13,16 +13,14 @@ from everest.config import EverestConfig from tests.everest.utils import ( create_cached_mocked_test_case, - relpath, satisfy, satisfy_callable, - tmpdir, ) CONFIG_FILE_MINIMAL = "config_minimal.yml" CONFIG_FILE_MOCKED_TEST_CASE = "mocked_multi_batch.yml" -CONFIG_PATH_MOCKED_TEST_CASE = relpath("test_data", "mocked_test_case") + pytestmark = pytest.mark.xdist_group(name="starts_everest") @@ -167,9 +165,10 @@ def test_everexport_entry_empty_export(mocked_func, copy_math_func_test_data_to_ @patch("everest.bin.utils.export") -@tmpdir(CONFIG_PATH_MOCKED_TEST_CASE) @pytest.mark.fails_on_macos_github_workflow -def test_everexport_entry_no_usr_def_ecl_keys(mocked_func): +def test_everexport_entry_no_usr_def_ecl_keys( + mocked_func, copy_mocked_test_data_to_tmp +): """Test running everexport with config file containing only the keywords label without any list of keys""" @@ -200,9 +199,10 @@ def condition(config: EverestConfig): @patch("everest.bin.utils.export") -@tmpdir(CONFIG_PATH_MOCKED_TEST_CASE) @pytest.mark.fails_on_macos_github_workflow -def test_everexport_entry_internalized_usr_def_ecl_keys(mocked_func, cache_dir): +def test_everexport_entry_internalized_usr_def_ecl_keys( + mocked_func, cache_dir, copy_mocked_test_data_to_tmp +): """Test running everexport with config file containing a key in the list of user defined ecl keywords, that has been internalized on a previous run""" @@ -242,9 +242,10 @@ def condition(config: EverestConfig): @patch("everest.bin.utils.export") -@tmpdir(CONFIG_PATH_MOCKED_TEST_CASE) @pytest.mark.fails_on_macos_github_workflow -def test_everexport_entry_non_int_usr_def_ecl_keys(mocked_func, cache_dir, caplog): +def test_everexport_entry_non_int_usr_def_ecl_keys( + mocked_func, cache_dir, caplog, copy_mocked_test_data_to_tmp +): """Test running everexport when config file contains non internalized ecl keys in the user defined keywords list""" @@ -290,9 +291,10 @@ def condition(config: EverestConfig): @patch("everest.bin.utils.export") -@tmpdir(CONFIG_PATH_MOCKED_TEST_CASE) @pytest.mark.fails_on_macos_github_workflow -def test_everexport_entry_not_available_batches(mocked_func, cache_dir, caplog): +def test_everexport_entry_not_available_batches( + mocked_func, cache_dir, caplog, copy_mocked_test_data_to_tmp +): """Test running everexport when config file contains non existing batch numbers in the list of user defined batches""" diff --git a/tests/everest/functional/test_main_everest_entry.py b/tests/everest/functional/test_main_everest_entry.py index a993aad8924..279e63bc3b0 100644 --- a/tests/everest/functional/test_main_everest_entry.py +++ b/tests/everest/functional/test_main_everest_entry.py @@ -6,9 +6,7 @@ from seba_sqlite.snapshot import SebaSnapshot from tests.everest.utils import ( capture_streams, - relpath, skipif_no_everest_models, - tmpdir, ) from everest import __version__ as everest_version @@ -22,8 +20,6 @@ ) CONFIG_FILE_MINIMAL = "config_minimal.yml" - -EGG_CONFIG_PATH = relpath("..", "..", "test-data", "everest", "egg") WELL_ORDER = "everest/model/config.yml" pytestmark = pytest.mark.xdist_group(name="starts_everest") @@ -167,11 +163,10 @@ def test_everest_main_lint_entry(copy_math_func_test_data_to_tmp): assert validation_msg in err.getvalue() -@tmpdir(EGG_CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow @skipif_no_everest_models @pytest.mark.everest_models_test -def test_everest_main_configdump_entry(): +def test_everest_main_configdump_entry(copy_egg_test_data_to_tmp): # Setup command line arguments with capture_streams() as (out, _): start_everest(["everest", "render", WELL_ORDER]) diff --git a/tests/everest/test_config_file_loader.py b/tests/everest/test_config_file_loader.py index b13f9ff4bde..d1711f89c00 100644 --- a/tests/everest/test_config_file_loader.py +++ b/tests/everest/test_config_file_loader.py @@ -9,7 +9,7 @@ from everest import ConfigKeys as CK from everest import config_file_loader as loader from everest.config import EverestConfig -from tests.everest.utils import relpath, tmpdir +from tests.everest.utils import relpath mocked_root = relpath(os.path.join("test_data", "mocked_test_case")) @@ -57,11 +57,9 @@ def test_load_config_as_yaml(): assert rendered_template is not None -@tmpdir(mocked_root) -def test_configpath_in_defs(): +def test_configpath_in_defs(copy_mocked_test_data_to_tmp): config_file = "mocked_multi_batch.yml" config = EverestConfig.load_file(config_file) - defs = { "numeric_key": 1, "bool_key": True, @@ -73,8 +71,7 @@ def test_configpath_in_defs(): assert defs == config.definitions -@tmpdir(mocked_root) -def test_dependent_definitions(): +def test_dependent_definitions(copy_mocked_test_data_to_tmp): config_file = "mocked_multi_batch.yml" with open(config_file, encoding="utf-8") as f: raw_config = YAML(typ="safe", pure=True).load(f) @@ -103,8 +100,7 @@ def test_dependent_definitions(): assert defs == config.definitions -@tmpdir(mocked_root) -def test_dependent_definitions_value_error(): +def test_dependent_definitions_value_error(copy_mocked_test_data_to_tmp): config_file = "mocked_multi_batch.yml" with open(config_file, encoding="utf-8") as f: raw_config = YAML(typ="safe", pure=True).load(f) @@ -121,16 +117,14 @@ def test_dependent_definitions_value_error(): EverestConfig.load_file(config_file) -@tmpdir(mocked_root) -def test_load_empty_configuration(): +def test_load_empty_configuration(copy_mocked_test_data_to_tmp): with open("empty_config.yml", mode="w", encoding="utf-8") as fh: fh.writelines("") with pytest.raises(ValidationError, match="missing"): EverestConfig.load_file("empty_config.yml") -@tmpdir(mocked_root) -def test_load_invalid_configuration(): +def test_load_invalid_configuration(copy_mocked_test_data_to_tmp): with open("invalid_config.yml", mode="w", encoding="utf-8") as fh: fh.writelines("asdf") with pytest.raises(ValidationError, match="missing"): diff --git a/tests/everest/test_controls.py b/tests/everest/test_controls.py index 90650632122..a62cf91cd78 100644 --- a/tests/everest/test_controls.py +++ b/tests/everest/test_controls.py @@ -16,7 +16,7 @@ ) from everest.config.input_constraint_config import InputConstraintConfig from everest.config.well_config import WellConfig -from tests.everest.utils import relpath, tmp, tmpdir +from tests.everest.utils import relpath cfg_dir = relpath("test_data", "mocked_test_case") mocked_config = relpath(cfg_dir, "mocked_test_case.yml") @@ -114,8 +114,7 @@ def _perturb_control_zero( return exp_var_def -@tmpdir(relpath("test_data")) -def test_variable_name_index_validation(): +def test_variable_name_index_validation(copy_test_data_to_tmp): config = EverestConfig.load_file( os.path.join("mocked_test_case", "mocked_test_case.yml") ) @@ -174,7 +173,7 @@ def test_variable_name_index_validation(): @pytest.mark.integration_test -def test_individual_control_variable_config(): +def test_individual_control_variable_config(copy_test_data_to_tmp): config_file = os.path.join("mocked_test_case", "config_input_constraints.yml") global_min = (0, 0.7, 1.3, None) @@ -184,26 +183,25 @@ def test_individual_control_variable_config(): test_base = (global_min, global_max, global_init, fill_missing) for gmin, gmax, ginit, fill in itertools.product(*test_base): - with tmp(relpath("test_data")): - config = EverestConfig.load_file(config_file) - exp_var_def = _perturb_control_zero(config, gmin, gmax, ginit, fill) - - # Not complete configuration - if None in [gmin, gmax, ginit] and not fill: - with pytest.raises(expected_exception=ValidationError): - EverestConfig.model_validate(config.to_dict()) - continue - - # Invalid parameters - def valid_control(var: ControlVariableConfig) -> bool: - return var.min <= var.initial_guess <= var.max - - if not all(map(valid_control, exp_var_def)): - with pytest.raises(expected_exception=ValidationError): - EverestConfig.model_validate(config.to_dict()) - continue - - EverestConfig.model_validate(config.to_dict()) + config = EverestConfig.load_file(config_file) + exp_var_def = _perturb_control_zero(config, gmin, gmax, ginit, fill) + + # Not complete configuration + if None in [gmin, gmax, ginit] and not fill: + with pytest.raises(expected_exception=ValidationError): + EverestConfig.model_validate(config.to_dict()) + continue + + # Invalid parameters + def valid_control(var: ControlVariableConfig) -> bool: + return var.min <= var.initial_guess <= var.max + + if not all(map(valid_control, exp_var_def)): + with pytest.raises(expected_exception=ValidationError): + EverestConfig.model_validate(config.to_dict()) + continue + + EverestConfig.model_validate(config.to_dict()) def test_control_variable_name(): diff --git a/tests/everest/test_detached.py b/tests/everest/test_detached.py index 7d8d973b980..7554aa56e78 100644 --- a/tests/everest/test_detached.py +++ b/tests/everest/test_detached.py @@ -37,7 +37,6 @@ SIMULATION_DIR, ) from everest.util import makedirs_if_needed -from tests.everest.utils import relpath, tmpdir class MockContext: @@ -155,8 +154,10 @@ def test_server_status(copy_math_func_test_data_to_tmp): @patch("everest.detached.server_is_running", return_value=False) -@tmpdir(relpath("test_data", "detached")) -def test_wait_for_server(server_is_running_mock, caplog): +def test_wait_for_server( + server_is_running_mock, caplog, copy_test_data_to_tmp, monkeypatch +): + monkeypatch.chdir("detached") config = EverestConfig.load_file("valid_yaml_config.yml") with caplog.at_level(logging.DEBUG), pytest.raises(Exception): diff --git a/tests/everest/test_egg_simulation.py b/tests/everest/test_egg_simulation.py index f810322d9ff..5b913ed0497 100644 --- a/tests/everest/test_egg_simulation.py +++ b/tests/everest/test_egg_simulation.py @@ -1,6 +1,5 @@ import json import os -import shutil import pytest @@ -15,11 +14,9 @@ from tests.everest.utils import ( everest_default_jobs, hide_opm, - relpath, skipif_no_everest_models, skipif_no_opm, skipif_no_simulator, - tmpdir, ) CONFIG_FILE = "everest/model/config.yml" @@ -461,7 +458,6 @@ for group in ["FIELD", "INJECT", "PRODUC"] ] ] -ROOT = os.path.join("..", "..", "test-data", "everest", "egg") def sort_res_summary(ert_config): @@ -565,8 +561,7 @@ def _generate_exp_ert_config(config_path, output_dir): @skipif_no_opm -@tmpdir(relpath(ROOT)) -def test_egg_model_convert(): +def test_egg_model_convert(copy_egg_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) ert_config = everest_to_ert_config(config) @@ -581,10 +576,9 @@ def test_egg_model_convert(): @hide_opm -@tmpdir(relpath(ROOT)) @skipif_no_everest_models @pytest.mark.everest_models_test -def test_egg_model_convert_no_opm(): +def test_egg_model_convert_no_opm(copy_egg_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) ert_config = everest_to_ert_config(config) @@ -601,8 +595,7 @@ def test_egg_model_convert_no_opm(): @skipif_no_everest_models @pytest.mark.everest_models_test -@tmpdir(relpath(ROOT)) -def test_opm_fail_default_summary_keys(): +def test_opm_fail_default_summary_keys(copy_egg_test_data_to_tmp): pytest.importorskip("everest_models") config = EverestConfig.load_file(CONFIG_FILE) @@ -628,8 +621,7 @@ def test_opm_fail_default_summary_keys(): @skipif_no_everest_models @pytest.mark.everest_models_test @skipif_no_opm -@tmpdir(relpath(ROOT)) -def test_opm_fail_explicit_summary_keys(): +def test_opm_fail_explicit_summary_keys(copy_egg_test_data_to_tmp): extra_sum_keys = [ "GOIR:PRODUC", "GOIT:INJECT", @@ -674,8 +666,7 @@ def test_opm_fail_explicit_summary_keys(): @skipif_no_everest_models @pytest.mark.everest_models_test @pytest.mark.integration_test -@tmpdir(relpath(ROOT)) -def test_init_egg_model(): +def test_init_egg_model(copy_egg_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) ert_config = everest_to_ert_config(config, site_config=ErtConfig.read_site_config()) ErtConfig.with_plugins().from_dict(config_dict=ert_config) @@ -685,8 +676,7 @@ def test_init_egg_model(): @pytest.mark.everest_models_test @skipif_no_simulator @pytest.mark.simulation_test -@tmpdir(relpath(ROOT)) -def test_run_egg_model(): +def test_run_egg_model(copy_egg_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) # test callback @@ -791,8 +781,7 @@ def sweetcallbackofmine(self, *args, **kwargs): @skipif_no_everest_models @pytest.mark.everest_models_test @skipif_no_opm -@tmpdir(relpath(ROOT)) -def test_egg_model_wells_json_output_no_none(): +def test_egg_model_wells_json_output_no_none(copy_egg_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) _ = everest_to_ert_config(config) @@ -812,9 +801,9 @@ def test_egg_model_wells_json_output_no_none(): @skipif_no_simulator @pytest.mark.simulation_test @pytest.mark.timeout(0) -def test_egg_snapshot(tmp_path, snapshot, monkeypatch): - shutil.copytree(relpath(ROOT), tmp_path, dirs_exist_ok=True) - monkeypatch.chdir(tmp_path) +def test_egg_snapshot(snapshot, copy_egg_test_data_to_tmp): + # shutil.copytree(relpath(ROOT), tmp_path, dirs_exist_ok=True) + # monkeypatch.chdir(tmp_path) config = EverestConfig.load_file(CONFIG_FILE) class CBTracker(object): diff --git a/tests/everest/test_everest_initialization.py b/tests/everest/test_everest_initialization.py index 67918206d39..e6e8e060deb 100644 --- a/tests/everest/test_everest_initialization.py +++ b/tests/everest/test_everest_initialization.py @@ -4,7 +4,6 @@ from everest.config import EverestConfig from everest.suite import _EverestWorkflow -from tests.everest.utils import relpath, tmp NO_PROJECT_RES = ( os.environ.get("NO_PROJECT_RES", False), @@ -13,20 +12,16 @@ @pytest.mark.skipif(NO_PROJECT_RES[0], reason=NO_PROJECT_RES[1]) -def test_init_no_project_res(): - root_dir = relpath("..", "..", "test-data", "everest", "egg") +def test_init_no_project_res(copy_egg_test_data_to_tmp): config_file = os.path.join("everest", "model", "config.yml") - with tmp(root_dir): - config_dict = EverestConfig.load_file(config_file) - _EverestWorkflow(config_dict) + config_dict = EverestConfig.load_file(config_file) + _EverestWorkflow(config_dict) -def test_init(): - root_dir = relpath("test_data", "mocked_test_case") +def test_init(copy_mocked_test_data_to_tmp): config_file = os.path.join("mocked_test_case.yml") - with tmp(root_dir): - config_dict = EverestConfig.load_file(config_file) - _EverestWorkflow(config_dict) + config_dict = EverestConfig.load_file(config_file) + _EverestWorkflow(config_dict) def test_no_config_init(): diff --git a/tests/everest/test_everest_output.py b/tests/everest/test_everest_output.py index cd83305f517..8fc1f23ee60 100644 --- a/tests/everest/test_everest_output.py +++ b/tests/everest/test_everest_output.py @@ -17,7 +17,6 @@ ) from everest.suite import _EverestWorkflow from everest.util import makedirs_if_needed -from tests.everest.utils import relpath, tmpdir def test_that_one_experiment_creates_one_ensemble_per_batch( @@ -44,8 +43,7 @@ def test_that_one_experiment_creates_one_ensemble_per_batch( @pytest.mark.integration_test @patch("ert.simulator.BatchSimulator.start", return_value=None) -@tmpdir(relpath("test_data", "mocked_test_case")) -def test_everest_output(start_mock): +def test_everest_output(start_mock, copy_mocked_test_data_to_tmp): config_folder = os.getcwd() config = EverestConfig.load_file("mocked_test_case.yml") everest_output_dir = config.output_dir diff --git a/tests/everest/test_export.py b/tests/everest/test_export.py index 4d4083dc973..3652c668ff2 100644 --- a/tests/everest/test_export.py +++ b/tests/everest/test_export.py @@ -9,10 +9,9 @@ from everest.config import EverestConfig from everest.config.export_config import ExportConfig from everest.export import export, validate_export -from tests.everest.utils import create_cached_mocked_test_case, relpath, tmpdir +from tests.everest.utils import create_cached_mocked_test_case, relpath CONFIG_FILE_MOCKED_TEST_CASE = "mocked_multi_batch.yml" -CONFIG_PATH_MOCKED_TEST_CASE = relpath("test_data", "mocked_test_case") CASHED_RESULTS_FOLDER = relpath("test_data", "cached_results_config_multiobj") CONFIG_FILE = "config_multiobj.yml" DATA = pd.DataFrame( @@ -173,9 +172,8 @@ def test_export_only_give_batches(copy_math_func_test_data_to_tmp): assert id == 2 -@tmpdir(CONFIG_PATH_MOCKED_TEST_CASE) @pytest.mark.fails_on_macos_github_workflow -def test_export_batches_progress(cache_dir): +def test_export_batches_progress(cache_dir, copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_MOCKED_TEST_CASE) shutil.copytree( @@ -265,9 +263,8 @@ def test_get_export_path(copy_math_func_test_data_to_tmp): assert expected_export_path == new_config.export_path -@tmpdir(CONFIG_PATH_MOCKED_TEST_CASE) @pytest.mark.fails_on_macos_github_workflow -def test_validate_export(cache_dir): +def test_validate_export(cache_dir, copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE_MOCKED_TEST_CASE) shutil.copytree( diff --git a/tests/everest/test_multiobjective.py b/tests/everest/test_multiobjective.py index d743a07c1f9..9f2d8df4901 100644 --- a/tests/everest/test_multiobjective.py +++ b/tests/everest/test_multiobjective.py @@ -7,14 +7,11 @@ from everest.simulator.everest_to_ert import everest_to_ert_config from everest.suite import _EverestWorkflow from tests.everest.test_config_validation import has_error -from tests.everest.utils import relpath, tmpdir -CONFIG_DIR = relpath("test_data", "mocked_test_case") CONFIG_FILE = "config_multi_objectives.yml" -@tmpdir(CONFIG_DIR) -def test_config_multi_objectives(): +def test_config_multi_objectives(copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) config_dict = config.to_dict() @@ -71,15 +68,13 @@ def test_config_multi_objectives(): _EverestWorkflow(config) -@tmpdir(CONFIG_DIR) -def test_multi_objectives2res(): +def test_multi_objectives2res(copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) res = everest_to_ert_config(config, site_config=ErtConfig.read_site_config()) ErtConfig.with_plugins().from_dict(config_dict=res) -@tmpdir(CONFIG_DIR) -def test_multi_objectives2ropt(): +def test_multi_objectives2ropt(copy_mocked_test_data_to_tmp): # pylint: disable=unbalanced-tuple-unpacking config = EverestConfig.load_file(CONFIG_FILE) config_dict = config.to_dict() @@ -103,8 +98,7 @@ def test_multi_objectives2ropt(): @pytest.mark.integration_test -@tmpdir(CONFIG_DIR) -def test_multi_objectives_run(): +def test_multi_objectives_run(copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) workflow = _EverestWorkflow(config) workflow.start_optimization() diff --git a/tests/everest/test_optimization_config.py b/tests/everest/test_optimization_config.py index d7cb30caf88..7283d553c32 100644 --- a/tests/everest/test_optimization_config.py +++ b/tests/everest/test_optimization_config.py @@ -3,11 +3,9 @@ import pytest from everest.config import EverestConfig -from tests.everest.utils import relpath, tmpdir -@tmpdir(relpath("test_data")) -def test_optimization_config(): +def test_optimization_config(copy_test_data_to_tmp): config_directory = "mocked_test_case" cfg = os.path.join(config_directory, "config_full_gradient_info.yml") full_config_dict = EverestConfig.load_file(cfg) diff --git a/tests/everest/test_output_constraints.py b/tests/everest/test_output_constraints.py index a1239468457..49ef017bd5d 100644 --- a/tests/everest/test_output_constraints.py +++ b/tests/everest/test_output_constraints.py @@ -10,14 +10,11 @@ from everest.suite import _EverestWorkflow from .test_config_validation import has_error -from .utils import relpath, tmpdir -CONFIG_DIR = relpath("test_data", "mocked_test_case") CONFIG_FILE = "config_output_constraints.yml" -@tmpdir(CONFIG_DIR) -def test_constraints_init(): +def test_constraints_init(copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) constr = list(config.output_constraints or []) @@ -45,8 +42,7 @@ def test_constraints_init(): assert [cn.scale for cn in constr] == 16 * [7500] -@tmpdir(CONFIG_DIR) -def test_wrong_output_constr_def(): +def test_wrong_output_constr_def(copy_mocked_test_data_to_tmp): # No RHS errors = EverestConfig.lint_config_dict( { @@ -188,8 +184,7 @@ def test_wrong_output_constr_def(): assert has_error(errors, "unable to parse string as a number") -@tmpdir(CONFIG_DIR) -def test_upper_bound_output_constraint_def(): +def test_upper_bound_output_constraint_def(copy_mocked_test_data_to_tmp): with open("conf_file", "w", encoding="utf-8") as f: f.write(" ") @@ -237,8 +232,7 @@ def test_upper_bound_output_constraint_def(): @pytest.mark.integration_test -@tmpdir(CONFIG_DIR) -def test_sim_output_constraints(): +def test_sim_output_constraints(copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) workflow = _EverestWorkflow(config) assert workflow is not None diff --git a/tests/everest/test_res_initialization.py b/tests/everest/test_res_initialization.py index a006feec338..92af23134eb 100644 --- a/tests/everest/test_res_initialization.py +++ b/tests/everest/test_res_initialization.py @@ -20,15 +20,12 @@ relpath, skipif_no_everest_models, skipif_no_opm, - tmp, - tmpdir, ) NO_PROJECT_RES = ( os.environ.get("NO_PROJECT_RES", False), "Skipping tests when no access to /project/res", ) - SNAKE_CONFIG_DIR = "snake_oil/everest/model" SNAKE_CONFIG_PATH = os.path.join(SNAKE_CONFIG_DIR, "snake_oil.yml") TUTORIAL_CONFIG_DIR = "mocked_test_case" @@ -213,8 +210,7 @@ def build_tutorial_dict(config_dir, output_dir): } -@tmpdir(relpath("test_data")) -def test_snake_everest_to_ert(): +def test_snake_everest_to_ert(copy_test_data_to_tmp): # Load config file ever_config_dict = EverestConfig.load_file(SNAKE_CONFIG_PATH) @@ -233,8 +229,7 @@ def test_snake_everest_to_ert(): ) -@tmpdir(relpath("test_data")) -def test_snake_everest_to_ert_slurm(): +def test_snake_everest_to_ert_slurm(copy_test_data_to_tmp): snake_slurm_config_path = os.path.join(SNAKE_CONFIG_DIR, "snake_oil_slurm.yml") # Load config file ever_config_dict = EverestConfig.load_file(snake_slurm_config_path) @@ -255,8 +250,7 @@ def test_snake_everest_to_ert_slurm(): @patch.dict("os.environ", {"USER": "NO_USERNAME"}) -@tmpdir(relpath("test_data")) -def test_tutorial_everest_to_ert(): +def test_tutorial_everest_to_ert(copy_test_data_to_tmp): tutorial_config_path = os.path.join(TUTORIAL_CONFIG_DIR, "mocked_test_case.yml") # Load config file ever_config_dict = EverestConfig.load_file(tutorial_config_path) @@ -279,8 +273,7 @@ def test_tutorial_everest_to_ert(): @skipif_no_opm -@tmpdir(relpath("test_data")) -def test_combined_wells_everest_to_ert(): +def test_combined_wells_everest_to_ert(copy_test_data_to_tmp): config_mocked_multi_batch = os.path.join( TUTORIAL_CONFIG_DIR, "mocked_multi_batch.yml" ) @@ -303,8 +296,7 @@ def test_combined_wells_everest_to_ert(): assert any(inj_in_strings) -@tmpdir(relpath("test_data")) -def test_lsf_queue_system(): +def test_lsf_queue_system(copy_test_data_to_tmp): snake_all_path = os.path.join(SNAKE_CONFIG_DIR, "snake_oil_all.yml") ever_config = EverestConfig.load_file(snake_all_path) @@ -316,8 +308,7 @@ def test_lsf_queue_system(): assert queue_system == "LSF" -@tmpdir(relpath("test_data")) -def test_queue_configuration(): +def test_queue_configuration(copy_test_data_to_tmp): snake_all_path = os.path.join(SNAKE_CONFIG_DIR, "snake_oil_all.yml") ever_config = EverestConfig.load_file(snake_all_path) @@ -353,7 +344,7 @@ def test_queue_config(): @patch.dict("os.environ", {"USER": "NO_USERNAME"}) -def test_install_data_no_init(): +def test_install_data_no_init(copy_test_data_to_tmp): """ TODO: When default jobs are handled in Everest this test should be deleted as it is superseded by test_install_data. @@ -365,44 +356,42 @@ def test_install_data_no_init(): test_base = list(zip(sources, targets, links, cmd_list)) tutorial_config_path = os.path.join(TUTORIAL_CONFIG_DIR, "mocked_test_case.yml") for source, target, link, cmd in test_base[1:2]: - with tmp(relpath("test_data")): - ever_config = EverestConfig.load_file(tutorial_config_path) - - if ever_config.install_data is None: - ever_config.install_data = [] - - ever_config.install_data.append( - InstallDataConfig( - source=source, - target=target, - link=link, - ) + ever_config = EverestConfig.load_file(tutorial_config_path) + + if ever_config.install_data is None: + ever_config.install_data = [] + + ever_config.install_data.append( + InstallDataConfig( + source=source, + target=target, + link=link, ) + ) - errors = EverestConfig.lint_config_dict(ever_config.to_dict()) - assert len(errors) == 0 + errors = EverestConfig.lint_config_dict(ever_config.to_dict()) + assert len(errors) == 0 - ert_config_dict = everest_to_ert_config(ever_config) + ert_config_dict = everest_to_ert_config(ever_config) - output_dir = ever_config.output_dir - tutorial_dict = build_tutorial_dict( - os.path.abspath(TUTORIAL_CONFIG_DIR), output_dir - ) + output_dir = ever_config.output_dir + tutorial_dict = build_tutorial_dict( + os.path.abspath(TUTORIAL_CONFIG_DIR), output_dir + ) - config_dir = ever_config.config_directory - tutorial_dict["SIMULATION_JOB"].insert( - 0, - (cmd, os.path.join(config_dir, source), target), - ) - assert tutorial_dict == ert_config_dict + config_dir = ever_config.config_directory + tutorial_dict["SIMULATION_JOB"].insert( + 0, + (cmd, os.path.join(config_dir, source), target), + ) + assert tutorial_dict == ert_config_dict @skipif_no_opm @skipif_no_everest_models @pytest.mark.everest_models_test @pytest.mark.integration_test -@tmpdir(relpath("../../test-data/everest/egg")) -def test_summary_default(): +def test_summary_default(copy_egg_test_data_to_tmp): config_dir = "everest/model" config_file = os.path.join(config_dir, "config.yml") everconf = EverestConfig.load_file(config_file) @@ -435,8 +424,7 @@ def test_summary_default(): @skipif_no_everest_models @pytest.mark.everest_models_test @pytest.mark.fails_on_macos_github_workflow -@tmpdir(relpath("../../test-data/everest/egg")) -def test_summary_default_no_opm(): +def test_summary_default_no_opm(copy_egg_test_data_to_tmp): config_dir = "everest/model" config_file = os.path.join(config_dir, "config.yml") everconf = EverestConfig.load_file(config_file) @@ -460,7 +448,7 @@ def test_summary_default_no_opm(): @pytest.mark.simulation_test -def test_install_data(): +def test_install_data(copy_test_data_to_tmp): """ TODO: When default jobs are handled in Everest this test should not be a simulation test. @@ -473,46 +461,44 @@ def test_install_data(): test_base = zip(sources, targets, links, cmds) tutorial_config_path = os.path.join(TUTORIAL_CONFIG_DIR, "mocked_test_case.yml") for source, target, link, cmd in test_base: - with tmp(relpath("test_data")): - ever_config = EverestConfig.load_file(tutorial_config_path) - - if ever_config.install_data is None: - ever_config.install_data = [] - - ever_config.install_data.append( - InstallDataConfig( - source=source, - target=target, - link=link, - ) + ever_config = EverestConfig.load_file(tutorial_config_path) + + if ever_config.install_data is None: + ever_config.install_data = [] + + ever_config.install_data.append( + InstallDataConfig( + source=source, + target=target, + link=link, ) + ) - errors = EverestConfig.lint_config_dict(ever_config.to_dict()) - assert len(errors) == 0 + errors = EverestConfig.lint_config_dict(ever_config.to_dict()) + assert len(errors) == 0 - ert_config_dict = everest_to_ert_config(ever_config) + ert_config_dict = everest_to_ert_config(ever_config) - output_dir = ever_config.output_dir - tutorial_dict = build_tutorial_dict( - os.path.abspath(TUTORIAL_CONFIG_DIR), output_dir - ) - config_dir = ever_config.config_directory - tutorial_dict["SIMULATION_JOB"].insert( - 0, - (cmd, os.path.join(config_dir, source), target), - ) - assert tutorial_dict == ert_config_dict + output_dir = ever_config.output_dir + tutorial_dict = build_tutorial_dict( + os.path.abspath(TUTORIAL_CONFIG_DIR), output_dir + ) + config_dir = ever_config.config_directory + tutorial_dict["SIMULATION_JOB"].insert( + 0, + (cmd, os.path.join(config_dir, source), target), + ) + assert tutorial_dict == ert_config_dict - # Instantiate res - ErtConfig.with_plugins().from_dict( - config_dict=everest_to_ert_config( - ever_config, site_config=ErtConfig.read_site_config() - ) + # Instantiate res + ErtConfig.with_plugins().from_dict( + config_dict=everest_to_ert_config( + ever_config, site_config=ErtConfig.read_site_config() ) + ) -@tmpdir(relpath("test_data")) -def test_strip_date_job_insertion(): +def test_strip_date_job_insertion(copy_test_data_to_tmp): # Load config file ever_config = EverestConfig.load_file(SNAKE_CONFIG_PATH) ever_config.model.report_steps = [ @@ -530,8 +516,7 @@ def test_strip_date_job_insertion(): assert snake_dict == ert_config_dict -@tmpdir(relpath("test_data")) -def test_forward_model_job_insertion(): +def test_forward_model_job_insertion(copy_test_data_to_tmp): # Load config file ever_config = EverestConfig.load_file(SNAKE_CONFIG_PATH) @@ -544,8 +529,7 @@ def test_forward_model_job_insertion(): assert res_job in jobs -@tmpdir(relpath("test_data")) -def test_workflow_job(): +def test_workflow_job(copy_test_data_to_tmp): workflow_jobs = [{"name": "test", "source": "jobs/TEST"}] ever_config = EverestConfig.load_file(SNAKE_CONFIG_PATH) ever_config.install_workflow_jobs = workflow_jobs @@ -558,8 +542,7 @@ def test_workflow_job(): ) -@tmpdir(relpath("test_data")) -def test_workflows(): +def test_workflows(copy_test_data_to_tmp): workflow_jobs = [{"name": "test", "source": "jobs/TEST"}] ever_config = EverestConfig.load_file(SNAKE_CONFIG_PATH) ever_config.install_workflow_jobs = workflow_jobs @@ -577,8 +560,7 @@ def test_workflows(): assert hooks[0] == ("pre_simulation", "PRE_SIMULATION") -@tmpdir(relpath("test_data")) -def test_user_config_jobs_precedence(): +def test_user_config_jobs_precedence(copy_test_data_to_tmp): # Load config file ever_config = EverestConfig.load_file(SNAKE_CONFIG_PATH) first_job = everest.jobs.script_names[0] @@ -594,8 +576,7 @@ def test_user_config_jobs_precedence(): assert job[0][1] == os.path.join(config_dir, "expected_source") -@tmpdir(relpath("test_data")) -def test_user_config_num_cpu(): +def test_user_config_num_cpu(copy_test_data_to_tmp): # Load config file ever_config = EverestConfig.load_file(SNAKE_CONFIG_PATH) diff --git a/tests/everest/test_ropt_initialization.py b/tests/everest/test_ropt_initialization.py index cf76477eaa5..fde52b8c81c 100644 --- a/tests/everest/test_ropt_initialization.py +++ b/tests/everest/test_ropt_initialization.py @@ -9,16 +9,14 @@ from everest.config import EverestConfig from everest.config_file_loader import yaml_file_to_substituted_config_dict from everest.optimizer.everest2ropt import everest2ropt -from tests.everest.utils import relpath, tmpdir +from tests.everest.utils import relpath _CONFIG_DIR = relpath("test_data/mocked_test_case") _CONFIG_FILE = "mocked_test_case.yml" -@tmpdir(_CONFIG_DIR) def test_tutorial_everest2ropt(): - ever_config = EverestConfig.load_file(_CONFIG_FILE) - + ever_config = EverestConfig.load_file(os.path.join(_CONFIG_DIR, _CONFIG_FILE)) ropt_config = EnOptConfig.model_validate(everest2ropt(ever_config)) realizations = ropt_config.realizations @@ -28,11 +26,8 @@ def test_tutorial_everest2ropt(): assert realizations.weights[0] == 0.5 -@tmpdir(relpath("test_data")) def test_everest2ropt_controls(): - config = EverestConfig.load_file( - os.path.join("mocked_test_case", "mocked_test_case.yml") - ) + config = EverestConfig.load_file(os.path.join(_CONFIG_DIR, _CONFIG_FILE)) controls = config.controls assert len(controls) == 1 @@ -43,11 +38,8 @@ def test_everest2ropt_controls(): assert len(ropt_config.variables.upper_bounds) == 16 -@tmpdir(relpath("test_data")) def test_everest2ropt_controls_auto_scale(): - config = EverestConfig.load_file( - os.path.join("mocked_test_case", "mocked_test_case.yml") - ) + config = EverestConfig.load_file(os.path.join(_CONFIG_DIR, _CONFIG_FILE)) controls = config.controls controls[0].auto_scale = True controls[0].scaled_range = [0.3, 0.7] @@ -56,11 +48,8 @@ def test_everest2ropt_controls_auto_scale(): assert numpy.allclose(ropt_config.variables.upper_bounds, 0.7) -@tmpdir(relpath("test_data")) def test_everest2ropt_variables_auto_scale(): - config = EverestConfig.load_file( - os.path.join("mocked_test_case", "mocked_test_case.yml") - ) + config = EverestConfig.load_file(os.path.join(_CONFIG_DIR, _CONFIG_FILE)) controls = config.controls controls[0].variables[1].auto_scale = True controls[0].variables[1].scaled_range = [0.3, 0.7] @@ -73,10 +62,9 @@ def test_everest2ropt_variables_auto_scale(): assert numpy.allclose(ropt_config.variables.upper_bounds[2:], 0.1) -@tmpdir(relpath("test_data")) def test_everest2ropt_controls_input_constraint(): config = EverestConfig.load_file( - os.path.join("mocked_test_case", "config_input_constraints.yml") + os.path.join(_CONFIG_DIR, "config_input_constraints.yml") ) input_constraints_ever_config = config.input_constraints # Check that there are two input constraints entries in the config @@ -100,10 +88,9 @@ def test_everest2ropt_controls_input_constraint(): assert exp_rhs == ropt_config.linear_constraints.rhs_values.tolist() -@tmpdir(relpath("test_data")) def test_everest2ropt_controls_input_constraint_auto_scale(): config = EverestConfig.load_file( - os.path.join("mocked_test_case", "config_input_constraints.yml") + os.path.join(_CONFIG_DIR, "config_input_constraints.yml") ) input_constraints_ever_config = config.input_constraints # Check that there are two input constraints entries in the config @@ -141,10 +128,8 @@ def test_everest2ropt_controls_input_constraint_auto_scale(): ) -@tmpdir(relpath("test_data")) def test_everest2ropt_controls_optimizer_setting(): - config = os.path.join("mocked_test_case", "config_full_gradient_info.yml") - + config = os.path.join(_CONFIG_DIR, "config_full_gradient_info.yml") config = EverestConfig.load_file(config) ropt_config = EnOptConfig.model_validate(everest2ropt(config)) assert len(ropt_config.realizations.names) == 15 @@ -153,19 +138,15 @@ def test_everest2ropt_controls_optimizer_setting(): assert ropt_config.realizations.names == tuple(range(15)) -@tmpdir(relpath("test_data")) def test_everest2ropt_constraints(): - config = os.path.join("mocked_test_case", "config_output_constraints.yml") + config = os.path.join(_CONFIG_DIR, "config_output_constraints.yml") config = EverestConfig.load_file(config) - ropt_config = EnOptConfig.model_validate(everest2ropt(config)) - assert len(ropt_config.nonlinear_constraints.names) == 16 -@tmpdir(relpath("test_data")) def test_everest2ropt_backend_options(): - config = os.path.join("mocked_test_case", "config_output_constraints.yml") + config = os.path.join(_CONFIG_DIR, "config_output_constraints.yml") config = EverestConfig.load_file(config) config.optimization.options = ["test = 1"] @@ -182,9 +163,8 @@ def test_everest2ropt_backend_options(): assert ropt_config.optimizer.options["test"] == 1 -@tmpdir(relpath("test_data")) def test_everest2ropt_samplers(): - config = os.path.join("mocked_test_case", "config_samplers.yml") + config = os.path.join(_CONFIG_DIR, "config_samplers.yml") config = EverestConfig.load_file(config) ropt_config = EnOptConfig.model_validate(everest2ropt(config)) @@ -203,9 +183,10 @@ def test_everest2ropt_samplers(): assert not ropt_config.samplers[idx].shared -@tmpdir(_CONFIG_DIR) def test_everest2ropt_cvar(): - config_dict = yaml_file_to_substituted_config_dict(_CONFIG_FILE) + config_dict = yaml_file_to_substituted_config_dict( + os.path.join(_CONFIG_DIR, _CONFIG_FILE) + ) config_dict["optimization"]["cvar"] = {} @@ -249,11 +230,8 @@ def test_everest2ropt_cvar(): assert ropt_config.realization_filters[0].options["percentile"] == 0.3 -@tmpdir(relpath("test_data")) def test_everest2ropt_arbitrary_backend_options(): - config = EverestConfig.load_file( - os.path.join("mocked_test_case", "mocked_test_case.yml") - ) + config = EverestConfig.load_file(os.path.join(_CONFIG_DIR, _CONFIG_FILE)) config.optimization.backend_options = {"a": [1]} ropt_config = EnOptConfig.model_validate(everest2ropt(config)) @@ -261,8 +239,7 @@ def test_everest2ropt_arbitrary_backend_options(): assert ropt_config.optimizer.options["a"] == [1] -@tmpdir(relpath("test_data")) -def test_everest2ropt_no_algorithm_name(): +def test_everest2ropt_no_algorithm_name(copy_test_data_to_tmp): config = EverestConfig.load_file( os.path.join("valid_config_file", "valid_yaml_config_no_algorithm.yml") ) diff --git a/tests/everest/test_templating.py b/tests/everest/test_templating.py index 37eda17cad2..47117cb93ce 100644 --- a/tests/everest/test_templating.py +++ b/tests/everest/test_templating.py @@ -7,19 +7,15 @@ import everest from everest.config import EverestConfig -from tests.everest.utils import relpath, tmpdir -TMPL_TEST_PATH = os.path.join("test_data", "templating") TMPL_CONFIG_FILE = "config.yml" TMPL_WELL_DRILL_FILE = os.path.join("templates", "well_drill_info.tmpl") TMPL_DUAL_INPUT_FILE = os.path.join("templates", "dual_input.tmpl") - MATH_CONFIG_FILE = "config_minimal.yml" -@tmpdir(relpath(TMPL_TEST_PATH)) -def test_render_invalid(): +def test_render_invalid(copy_templete_test_data_to_tmp): render = everest.jobs.templating.render prod_wells = {"PROD%d" % idx: 0.3 * idx for idx in range(4)} @@ -45,8 +41,7 @@ def test_render_invalid(): render(prod_in, TMPL_WELL_DRILL_FILE, None) -@tmpdir(relpath(TMPL_TEST_PATH)) -def test_render(): +def test_render(copy_templete_test_data_to_tmp): render = everest.jobs.templating.render wells = {"PROD%d" % idx: 0.2 * idx for idx in range(1, 5)} @@ -76,8 +71,7 @@ def test_render(): assert expected_string == line -@tmpdir(relpath(TMPL_TEST_PATH)) -def test_render_multiple_input(): +def test_render_multiple_input(copy_templete_test_data_to_tmp): render = everest.jobs.templating.render wells_north = {"PROD%d" % idx: 0.2 * idx for idx in range(1, 5)} @@ -99,8 +93,7 @@ def test_render_multiple_input(): assert output == ["0.2 vs 0.8"] -@tmpdir(relpath(TMPL_TEST_PATH)) -def test_render_executable(): +def test_render_executable(copy_templete_test_data_to_tmp): assert os.access(everest.jobs.render, os.X_OK) # Dump input @@ -132,8 +125,7 @@ def test_render_executable(): @pytest.mark.integration_test -@tmpdir(relpath(TMPL_TEST_PATH)) -def test_install_template(): +def test_install_template(copy_templete_test_data_to_tmp): config = EverestConfig.load_file(TMPL_CONFIG_FILE) workflow = everest.suite._EverestWorkflow(config) workflow.start_optimization() diff --git a/tests/everest/test_ui_run.py b/tests/everest/test_ui_run.py index 555718b4d70..4d19b3c1956 100644 --- a/tests/everest/test_ui_run.py +++ b/tests/everest/test_ui_run.py @@ -3,15 +3,14 @@ from ieverest import IEverest from tests.everest.dialogs_mocker import mock_dialogs_all -from tests.everest.utils import relpath, tmpdir +from tests.everest.utils import relpath CASE_DIR = relpath("test_data", "mocked_test_case") CONFIG_FILE = "mocked_test_case.yml" @pytest.mark.ui_test -@tmpdir(CASE_DIR) -def test_load_run(qapp, qtbot, mocker): +def test_load_run(qapp, qtbot, mocker, copy_mocked_test_data_to_tmp): """Load a configuration and run it from the UI""" qapp.setAttribute(Qt.AA_X11InitThreads) diff --git a/tests/everest/test_workflows.py b/tests/everest/test_workflows.py index 4869a1c4796..7b2f6641c57 100644 --- a/tests/everest/test_workflows.py +++ b/tests/everest/test_workflows.py @@ -5,15 +5,14 @@ from everest.config import EverestConfig from everest.suite import _EverestWorkflow -from tests.everest.utils import relpath, skipif_no_everest_models, tmpdir +from tests.everest.utils import relpath, skipif_no_everest_models CONFIG_DIR = relpath("test_data", "mocked_test_case") CONFIG_FILE = "config_workflow.yml" @pytest.mark.integration_test -@tmpdir(CONFIG_DIR) -def test_workflow_run(): +def test_workflow_run(copy_mocked_test_data_to_tmp): config = EverestConfig.load_file(CONFIG_FILE) workflow = _EverestWorkflow(config) diff --git a/tests/everest/test_yaml_parser.py b/tests/everest/test_yaml_parser.py index dfe883dfc68..43ad9e23bf4 100644 --- a/tests/everest/test_yaml_parser.py +++ b/tests/everest/test_yaml_parser.py @@ -8,14 +8,14 @@ from everest import ConfigKeys from everest.config import EverestConfig from everest.simulator.everest_to_ert import everest_to_ert_config -from tests.everest.utils import MockParser, relpath, skipif_no_everest_models, tmpdir +from tests.everest.utils import MockParser, relpath, skipif_no_everest_models -snake_oil_folder = relpath("test_data", "snake_oil") +# snake_oil_folder = relpath("test_data", "snake_oil") @pytest.mark.integration_test -@tmpdir(snake_oil_folder) -def test_default_seed(): +def test_default_seed(copy_test_data_to_tmp, monkeypatch): + monkeypatch.chdir("snake_oil") config_file = os.path.join("everest/model", "snake_oil_all.yml") config = EverestConfig.load_file(config_file) assert config.environment.random_seed is None @@ -55,8 +55,8 @@ def test_read_file(): assert exp_fn == everest_config.config_file -@tmpdir(relpath("test_data", "valid_config_file")) -def test_valid_config_file(): +def test_valid_config_file(copy_test_data_to_tmp, monkeypatch): + monkeypatch.chdir("valid_config_file") # pylint: disable=unsupported-membership-test parser = MockParser() @@ -96,11 +96,11 @@ def test_valid_config_file(): assert "could not find expected ':'" in parser.get_error() -@tmpdir(relpath("test_data", "valid_config_file", "forward_models")) @pytest.mark.fails_on_macos_github_workflow @skipif_no_everest_models @pytest.mark.everest_models_test -def test_valid_forward_model_config_files(): +def test_valid_forward_model_config_files(copy_test_data_to_tmp, monkeypatch): + monkeypatch.chdir("valid_config_file/forward_models") parser = MockParser() EverestConfig.load_file_with_argparser( "valid_config_maintained_forward_models.yml", parser=parser @@ -109,11 +109,11 @@ def test_valid_forward_model_config_files(): assert parser.get_error() is None -@tmpdir(relpath("test_data", "valid_config_file", "forward_models")) @skipif_no_everest_models @pytest.mark.everest_models_test @pytest.mark.fails_on_macos_github_workflow -def test_invalid_forward_model_config_files(): +def test_invalid_forward_model_config_files(copy_test_data_to_tmp, monkeypatch): + monkeypatch.chdir("valid_config_file/forward_models") parser = MockParser() next((Path.cwd() / "input" / "templates").glob("*")).unlink() EverestConfig.load_file_with_argparser( diff --git a/tests/everest/unit/everest/bin/test_everload.py b/tests/everest/unit/everest/bin/test_everload.py index 02b4137ff6d..38c9b5da349 100644 --- a/tests/everest/unit/everest/bin/test_everload.py +++ b/tests/everest/unit/everest/bin/test_everload.py @@ -7,8 +7,6 @@ from tests.everest.utils import ( capture_streams, create_cached_mocked_test_case, - relpath, - tmpdir, ) from ert.config import ErtConfig @@ -18,7 +16,6 @@ from everest.config import EverestConfig from everest.strings import STORAGE_DIR -CONFIG_PATH = relpath("test_data", "mocked_test_case") CONFIG_FILE = "mocked_multi_batch.yml" pytestmark = pytest.mark.xdist_group(name="starts_everest") @@ -62,9 +59,10 @@ def assertBackup(config: EverestConfig): @patch("everest.bin.everload_script._internalize_batch") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everload_entry_run(mocked_internalize, cache_dir): +def test_everload_entry_run( + mocked_internalize, cache_dir, copy_mocked_test_data_to_tmp +): """Test running everload on an optimization case""" config = get_config(cache_dir) everload_entry([CONFIG_FILE, "-s"]) @@ -76,9 +74,8 @@ def test_everload_entry_run(mocked_internalize, cache_dir): @patch("everest.bin.everload_script._internalize_batch") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everload_entry_run_empty_batch_list(_): +def test_everload_entry_run_empty_batch_list(_, copy_mocked_test_data_to_tmp): """Test running everload on an optimization case""" with pytest.raises(SystemExit), capture_streams() as (_, err): everload_entry([CONFIG_FILE, "-s", "-b"]) @@ -89,9 +86,10 @@ def test_everload_entry_run_empty_batch_list(_): @patch("everest.bin.everload_script._internalize_batch") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everload_entry_missing_folders(mocked_internalize, cache_dir): +def test_everload_entry_missing_folders( + mocked_internalize, cache_dir, copy_mocked_test_data_to_tmp +): """Test running everload when output folders are missing""" config = get_config(cache_dir) shutil.rmtree(config.simulation_dir) @@ -104,9 +102,10 @@ def test_everload_entry_missing_folders(mocked_internalize, cache_dir): @patch("everest.bin.everload_script._internalize_batch") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everload_entry_batches(mocked_internalize, cache_dir): +def test_everload_entry_batches( + mocked_internalize, cache_dir, copy_mocked_test_data_to_tmp +): """Test running everload with a selection of batches""" config = get_config(cache_dir) # pick every second batch (assume there are at least 2) @@ -122,9 +121,10 @@ def test_everload_entry_batches(mocked_internalize, cache_dir): @patch("everest.bin.everload_script._internalize_batch") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everload_entry_invalid_batches(mocked_internalize): +def test_everload_entry_invalid_batches( + mocked_internalize, copy_mocked_test_data_to_tmp +): """Test running everload with no or wrong batches""" with pytest.raises(SystemExit), capture_streams() as (_, err): everload_entry([CONFIG_FILE, "-s", "-b", "-2", "5412"]) @@ -138,9 +138,10 @@ def test_everload_entry_invalid_batches(mocked_internalize): @patch("everest.bin.everload_script._internalize_batch") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everload_entry_overwrite(mocked_internalize, cache_dir): +def test_everload_entry_overwrite( + mocked_internalize, cache_dir, copy_mocked_test_data_to_tmp +): """Test running everload with the --overwrite flag""" config = get_config(cache_dir) everload_entry([CONFIG_FILE, "-s", "--overwrite"]) @@ -156,9 +157,10 @@ def test_everload_entry_overwrite(mocked_internalize, cache_dir): @patch("everest.bin.everload_script._internalize_batch") -@tmpdir(CONFIG_PATH) @pytest.mark.fails_on_macos_github_workflow -def test_everload_entry_not_silent(mocked_internalize, cache_dir): +def test_everload_entry_not_silent( + mocked_internalize, cache_dir, copy_mocked_test_data_to_tmp +): """Test running everload without the -s flag""" config = get_config(cache_dir) diff --git a/tests/everest/utils/__init__.py b/tests/everest/utils/__init__.py index d5b62a23b17..3e0d9dfcdb7 100644 --- a/tests/everest/utils/__init__.py +++ b/tests/everest/utils/__init__.py @@ -1,11 +1,9 @@ import contextlib import importlib.util -import logging import os import pathlib import shutil import sys -import tempfile from io import StringIO from unittest import mock @@ -59,60 +57,6 @@ def relpath(*path): return os.path.join(os.path.dirname(os.path.dirname(__file__)), *path) -def tmpdir(path, teardown=True): - """Decorator based on the `tmp` context""" - - def real_decorator(function): - def wrapper(function, *args, **kwargs): - with tmp(path, teardown=teardown): - return function(*args, **kwargs) - - return decorator.decorator(wrapper, function) - - return real_decorator - - -@contextlib.contextmanager -def tmp(path=None, teardown=True): - """Create and go into tmp directory, returns the path. - - This function creates a temporary directory and enters that directory. The - returned object is the path to the created directory. - - If @path is not specified, we create an empty directory, otherwise, it must - be a path to an existing directory. In that case, the directory will be - copied into the temporary directory. - - If @teardown is True (defaults to True), the directory is (attempted) - deleted after context, otherwise it is kept as is. - - """ - cwd = os.getcwd() - fname = tempfile.NamedTemporaryFile().name # noqa: SIM115 - - if path: - if not os.path.isdir(path): - logging.debug("tmp:raise no such path") - raise IOError("No such directory: %s" % path) - shutil.copytree(path, fname) - else: - # no path to copy, create empty dir - os.mkdir(fname) - - os.chdir(fname) - - yield fname # give control to caller scope - - os.chdir(cwd) - - if teardown: - try: - shutil.rmtree(fname) - except OSError as oserr: - logging.debug("tmp:rmtree failed %s (%s)" % (fname, oserr)) - shutil.rmtree(fname, ignore_errors=True) - - @contextlib.contextmanager def capture_streams(): """Context that allows capturing text sent to stdout and stderr