Skip to content

Commit

Permalink
EverestRunModel: minor refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
verveerpj committed Dec 18, 2024
1 parent 891f8ca commit 3e4ae0c
Showing 1 changed file with 53 additions and 63 deletions.
116 changes: 53 additions & 63 deletions src/ert/run_models/everest_run_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,62 +41,9 @@
from .base_run_model import BaseRunModel, StatusEvents

if TYPE_CHECKING:
import numpy.typing as npt

from ert.storage import Ensemble, Experiment


# A number of settings for the table reporters:
RESULT_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"functions.weighted_objective": "Total-Objective",
"linear_constraints.violations": "IC-violation",
"nonlinear_constraints.violations": "OC-violation",
"functions.objectives": "Objective",
"functions.constraints": "Constraint",
"evaluations.variables": "Control",
"linear_constraints.values": "IC-diff",
"nonlinear_constraints.values": "OC-diff",
"functions.scaled_objectives": "Scaled-Objective",
"functions.scaled_constraints": "Scaled-Constraint",
"evaluations.scaled_variables": "Scaled-Control",
"nonlinear_constraints.scaled_values": "Scaled-OC-diff",
"nonlinear_constraints.scaled_violations": "Scaled-OC-violation",
}
GRADIENT_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"gradients.weighted_objective": "Total-Gradient",
"gradients.objectives": "Grad-objective",
"gradients.constraints": "Grad-constraint",
}
SIMULATION_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"realization": "Realization",
"evaluations.evaluation_ids": "Simulation",
"evaluations.variables": "Control",
"evaluations.objectives": "Objective",
"evaluations.constraints": "Constraint",
"evaluations.scaled_variables": "Scaled-Control",
"evaluations.scaled_objectives": "Scaled-Objective",
"evaluations.scaled_constraints": "Scaled-Constraint",
}
PERTURBATIONS_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"realization": "Realization",
"evaluations.perturbed_evaluation_ids": "Simulation",
"evaluations.perturbed_variables": "Control",
"evaluations.perturbed_objectives": "Objective",
"evaluations.perturbed_constraints": "Constraint",
"evaluations.scaled_perturbed_variables": "Scaled-Control",
"evaluations.scaled_perturbed_objectives": "Scaled-Objective",
"evaluations.scaled_perturbed_constraints": "Scaled-Constraint",
}
MIN_HEADER_LEN = 3

logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -220,7 +167,6 @@ def run_experiment(
self, evaluator_server_config: EvaluatorServerConfig, restart: bool = False
) -> None:
self.log_at_startup()
self.restart = restart
self._eval_server_cfg = evaluator_server_config
self._experiment = self._storage.create_experiment(
name=f"EnOpt@{datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S')}",
Expand Down Expand Up @@ -343,18 +289,62 @@ def _on_before_forward_model_evaluation(
optimizer.abort_optimization()

def _create_optimizer(self) -> BasicOptimizer:
assert (
self._everest_config.environment is not None
and self._everest_config.environment is not None
)

ropt_output_folder = Path(self._everest_config.optimization_output_dir)
RESULT_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"functions.weighted_objective": "Total-Objective",
"linear_constraints.violations": "IC-violation",
"nonlinear_constraints.violations": "OC-violation",
"functions.objectives": "Objective",
"functions.constraints": "Constraint",
"evaluations.variables": "Control",
"linear_constraints.values": "IC-diff",
"nonlinear_constraints.values": "OC-diff",
"functions.scaled_objectives": "Scaled-Objective",
"functions.scaled_constraints": "Scaled-Constraint",
"evaluations.scaled_variables": "Scaled-Control",
"nonlinear_constraints.scaled_values": "Scaled-OC-diff",
"nonlinear_constraints.scaled_violations": "Scaled-OC-violation",
}
GRADIENT_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"gradients.weighted_objective": "Total-Gradient",
"gradients.objectives": "Grad-objective",
"gradients.constraints": "Grad-constraint",
}
SIMULATION_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"realization": "Realization",
"evaluations.evaluation_ids": "Simulation",
"evaluations.variables": "Control",
"evaluations.objectives": "Objective",
"evaluations.constraints": "Constraint",
"evaluations.scaled_variables": "Scaled-Control",
"evaluations.scaled_objectives": "Scaled-Objective",
"evaluations.scaled_constraints": "Scaled-Constraint",
}
PERTURBATIONS_COLUMNS = {
"result_id": "ID",
"batch_id": "Batch",
"realization": "Realization",
"evaluations.perturbed_evaluation_ids": "Simulation",
"evaluations.perturbed_variables": "Control",
"evaluations.perturbed_objectives": "Objective",
"evaluations.perturbed_constraints": "Constraint",
"evaluations.scaled_perturbed_variables": "Scaled-Control",
"evaluations.scaled_perturbed_objectives": "Scaled-Objective",
"evaluations.scaled_perturbed_constraints": "Scaled-Constraint",
}
MIN_HEADER_LEN = 3

# Initialize the optimizer with output tables. `min_header_len` is set
# to ensure that all tables have the same number of header lines,
# simplifying code that reads them as fixed width tables. `maximize` is
# set because ropt reports minimization results, while everest wants
# maximization results, necessitating a conversion step.
ropt_output_folder = Path(self._everest_config.optimization_output_dir)
optimizer = (
BasicOptimizer(
enopt_config=self._ropt_config, evaluator=self._forward_model_evaluator
Expand Down Expand Up @@ -451,7 +441,7 @@ def _get_active_results(
)
return values

def init_case_data(
def _init_case_data(
self,
control_values: NDArray[np.float64],
metadata: EvaluatorContext,
Expand Down Expand Up @@ -558,7 +548,7 @@ def _slug(entity: str) -> str:
metadata.config.realizations.names[realization]
for realization in metadata.realizations
]
case_data, active, cached = self.init_case_data(
case_data, active, cached = self._init_case_data(
control_values=control_values,
metadata=metadata,
realization_ids=realization_ids,
Expand Down Expand Up @@ -605,7 +595,7 @@ def _slug(entity: str) -> str:

self._delete_runpath(run_args)
# gather results
results: list[dict[str, npt.NDArray[np.float64]]] = []
results: list[dict[str, NDArray[np.float64]]] = []
for sim_id, successful in enumerate(self.active_realizations):
if not successful:
logger.error(f"Simulation {sim_id} failed.")
Expand Down

0 comments on commit 3e4ae0c

Please sign in to comment.