diff --git a/src/ert/run_models/everest_run_model.py b/src/ert/run_models/everest_run_model.py index cb00eef4d03..d5abd42783d 100644 --- a/src/ert/run_models/everest_run_model.py +++ b/src/ert/run_models/everest_run_model.py @@ -37,7 +37,7 @@ from everest.strings import EVEREST from ..run_arg import RunArg, create_run_arguments -from .base_run_model import BaseRunModel, StatusEvents +from .base_run_model import BaseRunModel if TYPE_CHECKING: from ert.storage import Ensemble, Experiment @@ -116,9 +116,8 @@ def __init__( "add the above random seed to your configuration file." ) - self.ropt_config = everest2ropt(everest_config) - self.everest_config = everest_config - self.support_restart = False + self._ropt_config = everest2ropt(everest_config) + self._everest_config = everest_config self._sim_callback = simulation_callback self._opt_callback = optimization_callback @@ -134,19 +133,18 @@ def __init__( else None ) self._experiment: Experiment | None = None - self.eval_server_cfg: EvaluatorServerConfig | None = None - storage = open_storage(config.ens_path, mode="w") - status_queue: queue.SimpleQueue[StatusEvents] = queue.SimpleQueue() - self.batch_id: int = 0 - self.status: SimulationStatus | None = None + self._eval_server_cfg: EvaluatorServerConfig | None = None + self._batch_id: int = 0 + self._status: SimulationStatus | None = None super().__init__( config, - storage, + open_storage(config.ens_path, mode="w"), config.queue_config, - status_queue, + queue.SimpleQueue(), active_realizations=[], # Set dynamically in run_forward_model() ) + self.support_restart = False @classmethod def create( @@ -155,9 +153,7 @@ def create( simulation_callback: SimulationCallback | None = None, optimization_callback: OptimizerCallback | None = None, ) -> EverestRunModel: - def default_simulation_callback( - simulation_status: SimulationStatus | None, - ) -> str | None: + def default_simulation_callback(_: SimulationStatus | None) -> str | None: return None def default_optimization_callback() -> str | None: @@ -168,8 +164,9 @@ def default_optimization_callback() -> str | None: config=ert_config, everest_config=ever_config, simulation_callback=simulation_callback or default_simulation_callback, - optimization_callback=optimization_callback - or default_optimization_callback, + optimization_callback=( + optimization_callback or default_optimization_callback + ), ) @classmethod @@ -189,7 +186,7 @@ def result(self) -> OptimalResult | None: return self._result def __repr__(self) -> str: - config_json = json.dumps(self.everest_config, sort_keys=True, indent=2) + config_json = json.dumps(self._everest_config, sort_keys=True, indent=2) return f"EverestRunModel(config={config_json})" def run_experiment( @@ -197,7 +194,7 @@ def run_experiment( ) -> None: self.log_at_startup() self.restart = restart - self.eval_server_cfg = evaluator_server_config + self._eval_server_cfg = evaluator_server_config self._experiment = self._storage.create_experiment( name=f"EnOpt@{datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S')}", parameters=self.ert_config.ensemble_config.parameter_configuration, @@ -214,7 +211,7 @@ def run_experiment( # This mechanism is outdated and not supported by the ropt package. It # is retained for now via the seba_sqlite package. seba_storage = SqliteStorage( # type: ignore - optimizer, self.everest_config.optimization_output_dir + optimizer, self._everest_config.optimization_output_dir ) # Run the optimization: @@ -284,10 +281,10 @@ def _create_optimizer(self) -> BasicOptimizer: # simplifying code that reads them as fixed width tables. `maximize` is # set because ropt reports minimization results, while everest wants # maximization results, necessitating a conversion step. - ropt_output_folder = Path(self.everest_config.optimization_output_dir) + ropt_output_folder = Path(self._everest_config.optimization_output_dir) optimizer = ( BasicOptimizer( - enopt_config=self.ropt_config, evaluator=self._forward_model_evaluator + enopt_config=self._ropt_config, evaluator=self._forward_model_evaluator ) .add_table( columns=RESULT_COLUMNS, @@ -347,9 +344,9 @@ def _on_before_forward_model_evaluation( logging.getLogger(EVEREST).debug("Optimization callback called") if ( - self.everest_config.optimization is not None - and self.everest_config.optimization.max_batch_num is not None - and (self.batch_id >= self.everest_config.optimization.max_batch_num) + self._everest_config.optimization is not None + and self._everest_config.optimization.max_batch_num is not None + and (self._batch_id >= self._everest_config.optimization.max_batch_num) ): self._exit_code = EverestExitCode.MAX_BATCH_NUM_REACHED logging.getLogger(EVEREST).info("Maximum number of batches reached") @@ -365,7 +362,7 @@ def _forward_model_evaluator( self, control_values: NDArray[np.float64], evaluator_context: EvaluatorContext ) -> EvaluatorResult: # Reset the current run status: - self.status = None + self._status = None # Get any cached_results results that may be useful: cached_results = self._get_cached_results(control_values, evaluator_context) @@ -378,7 +375,7 @@ def _forward_model_evaluator( # Initialize a new experiment in storage: assert self._experiment ensemble = self._experiment.create_ensemble( - name=f"batch_{self.batch_id}", + name=f"batch_{self._batch_id}", ensemble_size=len(case_data), ) for sim_id, controls in enumerate(case_data.values()): @@ -393,8 +390,8 @@ def _forward_model_evaluator( "_ERT_SIMULATION_MODE": "batch_simulation", } ) - assert self.eval_server_cfg - self._evaluate_and_postprocess(run_args, ensemble, self.eval_server_cfg) + assert self._eval_server_cfg + self._evaluate_and_postprocess(run_args, ensemble, self._eval_server_cfg) # If necessary, delete the run path: self._delete_runpath(run_args) @@ -406,7 +403,7 @@ def _forward_model_evaluator( ) # Increase the batch ID for the next evaluation: - self.batch_id += 1 + self._batch_id += 1 # Add the results from the evaluations to the cache: self._add_results_to_cache( @@ -526,7 +523,7 @@ def _check_suffix( f"Key {key} has suffixes, a suffix must be specified" ) - if set(controls.keys()) != set(self.everest_config.control_names): + if set(controls.keys()) != set(self._everest_config.control_names): err_msg = "Mismatch between initialized and provided control names." raise KeyError(err_msg) @@ -562,11 +559,10 @@ def _slug(entity: str) -> str: self.active_realizations = [True] * len(case_data) assert evaluator_context.config.realizations.names is not None for sim_id, control_idx in enumerate(case_data.keys()): - if self.active_realizations[sim_id]: - realization = evaluator_context.realizations[control_idx] - substitutions[f""] = str( - evaluator_context.config.realizations.names[realization] - ) + realization = evaluator_context.realizations[control_idx] + substitutions[f""] = str( + evaluator_context.config.realizations.names[realization] + ) run_paths = Runpaths( jobname_format=self.ert_config.model_config.jobname_format_string, @@ -584,8 +580,8 @@ def _slug(entity: str) -> str: def _delete_runpath(self, run_args: list[RunArg]) -> None: logging.getLogger(EVEREST).debug("Simulation callback called") if ( - self.everest_config.simulator is not None - and self.everest_config.simulator.delete_run_path + self._everest_config.simulator is not None + and self._everest_config.simulator.delete_run_path ): for i, real in self.get_current_snapshot().reals.items(): path_to_delete = run_args[int(i)].runpath @@ -614,11 +610,11 @@ def _gather_results( results.append({}) continue d = {} - for key in self.everest_config.result_names: + for key in self._everest_config.result_names: data = ensemble.load_responses(key, (sim_id,)) d[key] = data["values"].to_numpy() results.append(d) - for fnc_name, alias in self.everest_config.function_aliases.items(): + for fnc_name, alias in self._everest_config.function_aliases.items(): for result in results: result[fnc_name] = result[alias] return results @@ -663,7 +659,7 @@ def _get_evaluator_result( return EvaluatorResult( objectives=objectives, constraints=constraints, - batch_id=self.batch_id, + batch_id=self._batch_id, evaluation_ids=sim_ids, ) @@ -688,18 +684,18 @@ def _add_results_to_cache( def check_if_runpath_exists(self) -> bool: return ( - self.everest_config.simulation_dir is not None - and os.path.exists(self.everest_config.simulation_dir) - and any(os.listdir(self.everest_config.simulation_dir)) + self._everest_config.simulation_dir is not None + and os.path.exists(self._everest_config.simulation_dir) + and any(os.listdir(self._everest_config.simulation_dir)) ) def send_snapshot_event(self, event: Event, iteration: int) -> None: super().send_snapshot_event(event, iteration) if type(event) in (EESnapshot, EESnapshotUpdate): newstatus = self._simulation_status(self.get_current_snapshot()) - if self.status != newstatus: # No change in status + if self._status != newstatus: # No change in status self._sim_callback(newstatus) - self.status = newstatus + self._status = newstatus def _simulation_status(self, snapshot: EnsembleSnapshot) -> SimulationStatus: jobs_progress: list[list[JobProgress]] = [] @@ -724,7 +720,7 @@ def _simulation_status(self, snapshot: EnsembleSnapshot) -> SimulationStatus: ) if fm_step.get("error", ""): self._handle_errors( - batch=self.batch_id, + batch=self._batch_id, simulation=simulation, realization=realization, fm_name=fm_step.get("name", "Unknown"), # type: ignore @@ -735,7 +731,7 @@ def _simulation_status(self, snapshot: EnsembleSnapshot) -> SimulationStatus: return { "status": self.get_current_status(), "progress": jobs_progress, - "batch_number": self.batch_id, + "batch_number": self._batch_id, } def _handle_errors( diff --git a/tests/everest/test_everest_output.py b/tests/everest/test_everest_output.py index 5287251aca4..78ee7e439b8 100644 --- a/tests/everest/test_everest_output.py +++ b/tests/everest/test_everest_output.py @@ -52,13 +52,7 @@ async def test_everest_output(copy_mocked_test_data_to_tmp): initial_folders = set(folders) initial_files = set(files) - # Tests in this class used to fail when a callback was passed in - # Use a callback just to see that everything works fine, even though - # the callback does nothing - def useless_cb(*args, **kwargs): - pass - - EverestRunModel.create(config, optimization_callback=useless_cb) + EverestRunModel.create(config) # Check the output folder is created when stating the optimization # in everest workflow diff --git a/tests/everest/test_everserver.py b/tests/everest/test_everserver.py index 7dc110d8c25..84224de944e 100644 --- a/tests/everest/test_everserver.py +++ b/tests/everest/test_everserver.py @@ -121,7 +121,7 @@ def test_everserver_status_failure(_1, copy_math_func_test_data_to_tmp): "ert.run_models.everest_run_model.EverestRunModel.run_experiment", autospec=True, side_effect=lambda self, evaluator_server_config, restart=False: check_status( - ServerConfig.get_hostfile_path(self.everest_config.output_dir), + ServerConfig.get_hostfile_path(self._everest_config.output_dir), status=ServerStatus.running, ), ) diff --git a/tests/everest/test_simulator_cache.py b/tests/everest/test_simulator_cache.py index e347fc76476..02f9ae4d4ff 100644 --- a/tests/everest/test_simulator_cache.py +++ b/tests/everest/test_simulator_cache.py @@ -47,7 +47,7 @@ def new_call(*args): Path("everest_output/optimization_output/seba.db").unlink() # The batch_id was used as a stopping criterion, so it must be reset: - run_model.batch_id = 0 + run_model._batch_id = 0 run_model.run_experiment(evaluator_server_config) assert n_evals == 0