Skip to content

Commit

Permalink
Move results processing into solve_internal_problem.
Browse files Browse the repository at this point in the history
  • Loading branch information
janosg committed Nov 30, 2024
1 parent 8596bed commit c1f0d2c
Show file tree
Hide file tree
Showing 5 changed files with 111 additions and 116 deletions.
72 changes: 66 additions & 6 deletions src/optimagic/optimization/algorithm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,16 @@

from optimagic.exceptions import InvalidAlgoInfoError, InvalidAlgoOptionError
from optimagic.logging.types import StepStatus
from optimagic.optimization.convergence_report import get_convergence_report
from optimagic.optimization.history import History
from optimagic.optimization.internal_optimization_problem import (
InternalOptimizationProblem,
)
from optimagic.optimization.optimize_result import OptimizeResult
from optimagic.parameters.conversion import Converter
from optimagic.type_conversion import TYPE_CONVERTERS
from optimagic.typing import AggregationLevel
from optimagic.typing import AggregationLevel, Direction, ExtraResultFields
from optimagic.utilities import isscalar


@dataclass(frozen=True)
Expand Down Expand Up @@ -142,6 +146,56 @@ def __post_init__(self) -> None:
)
raise TypeError(msg)

def create_optimize_result(
self,
converter: Converter,
solver_type: AggregationLevel,
extra_fields: ExtraResultFields,
) -> OptimizeResult:
"""Process an internal optimizer result."""
params = converter.params_from_internal(self.x)
if isscalar(self.fun):
fun = float(self.fun)
elif solver_type == AggregationLevel.LIKELIHOOD:
fun = float(np.sum(self.fun))
elif solver_type == AggregationLevel.LEAST_SQUARES:
fun = np.dot(self.fun, self.fun)

if extra_fields.direction == Direction.MAXIMIZE:
fun = -fun

if self.history is not None:
conv_report = get_convergence_report(
history=self.history, direction=extra_fields.direction
)
else:
conv_report = None

out = OptimizeResult(
params=params,
fun=fun,
start_fun=extra_fields.start_fun,
start_params=extra_fields.start_params,
algorithm=extra_fields.algorithm,
direction=extra_fields.direction.value,
n_free=extra_fields.n_free,
message=self.message,
success=self.success,
n_fun_evals=self.n_fun_evals,
n_jac_evals=self.n_jac_evals,
n_hess_evals=self.n_hess_evals,
n_iterations=self.n_iterations,
status=self.status,
jac=self.jac,
hess=self.hess,
hess_inv=self.hess_inv,
max_constraint_violation=self.max_constraint_violation,
history=self.history,
algorithm_output=self.info,
convergence_report=conv_report,
)
return out


class AlgorithmMeta(ABCMeta):
"""Metaclass to get repr, algo_info and name for classes, not just instances."""
Expand Down Expand Up @@ -234,25 +288,31 @@ def solve_internal_problem(
problem: InternalOptimizationProblem,
x0: NDArray[np.float64],
step_id: int,
) -> InternalOptimizeResult:
) -> OptimizeResult:
problem = problem.with_new_history().with_step_id(step_id)

if problem.logger:
problem.logger.step_store.update(
step_id, {"status": str(StepStatus.RUNNING.value)}
)

result = self._solve_internal_problem(problem, x0)
raw_res = self._solve_internal_problem(problem, x0)

if (not self.algo_info.disable_history) and (result.history is None):
result = replace(result, history=problem.history)
if (not self.algo_info.disable_history) and (raw_res.history is None):
raw_res = replace(raw_res, history=problem.history)

if problem.logger:
problem.logger.step_store.update(
step_id, {"status": str(StepStatus.COMPLETE.value)}
)

return result
res = raw_res.create_optimize_result(
converter=problem.converter,
solver_type=self.algo_info.solver_type,
extra_fields=problem.static_result_fields,
)

return res

def with_option_if_applicable(self, **kwargs: Any) -> Self:
"""Call with_option only with applicable keyword arguments."""
Expand Down
25 changes: 19 additions & 6 deletions src/optimagic/optimization/multistart.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
"""

import warnings
from dataclasses import replace
from typing import Literal

import numpy as np
Expand All @@ -21,7 +20,7 @@

from optimagic.logging.logger import LogStore
from optimagic.logging.types import StepStatus
from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult
from optimagic.optimization.algorithm import Algorithm
from optimagic.optimization.internal_optimization_problem import (
InternalBounds,
InternalOptimizationProblem,
Expand All @@ -30,6 +29,8 @@
from optimagic.optimization.optimization_logging import (
log_scheduled_steps_and_get_ids,
)
from optimagic.optimization.optimize_result import OptimizeResult
from optimagic.optimization.process_results import process_multistart_result
from optimagic.typing import AggregationLevel, ErrorHandling
from optimagic.utilities import get_rng

Expand All @@ -42,7 +43,7 @@ def run_multistart_optimization(
options: InternalMultistartOptions,
logger: LogStore | None,
error_handling: ErrorHandling,
) -> InternalOptimizeResult:
) -> OptimizeResult:
steps = determine_steps(options.n_samples, stopping_maxopt=options.stopping_maxopt)

scheduled_steps = log_scheduled_steps_and_get_ids(
Expand Down Expand Up @@ -159,6 +160,7 @@ def single_optimization(x0, step_id):
results=batch_results,
convergence_criteria=convergence_criteria,
solver_type=local_algorithm.algo_info.solver_type,
converter=internal_problem.converter,
)
opt_counter += len(batch)
if is_converged:
Expand All @@ -176,7 +178,12 @@ def single_optimization(x0, step_id):
}

raw_res = state["best_res"]
res = replace(raw_res, multistart_info=multistart_info)
res = process_multistart_result(
raw_res=raw_res,
converter=internal_problem.converter,
extra_fields=internal_problem.static_result_fields,
multistart_info=multistart_info,
)

return res

Expand Down Expand Up @@ -371,7 +378,12 @@ def get_batched_optimization_sample(sorted_sample, stopping_maxopt, batch_size):


def update_convergence_state(
current_state, starts, results, convergence_criteria, solver_type
current_state,
starts,
results,
convergence_criteria,
solver_type,
converter,
):
"""Update the state of all quantities related to convergence.
Expand All @@ -389,6 +401,7 @@ def update_convergence_state(
convergence_criteria (dict): Dict with the entries "xtol" and "max_discoveries"
solver_type: The aggregation level of the local optimizer. Needed to
interpret the output of the internal criterion function.
converter: The converter to map between internal and external parameter spaces.
Returns:
Expand Down Expand Up @@ -422,7 +435,7 @@ def update_convergence_state(
# ==================================================================================
valid_results = [results[i] for i in valid_indices]
valid_starts = [starts[i] for i in valid_indices]
valid_new_x = [res.x for res in valid_results]
valid_new_x = [converter.params_to_internal(res.params) for res in valid_results]
valid_new_y = []

# make the criterion output scalar if a least squares optimizer returns an
Expand Down
23 changes: 2 additions & 21 deletions src/optimagic/optimization/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,6 @@
)
from optimagic.optimization.optimization_logging import log_scheduled_steps_and_get_ids
from optimagic.optimization.optimize_result import OptimizeResult
from optimagic.optimization.process_results import (
process_multistart_result,
process_single_result,
)
from optimagic.parameters.bounds import Bounds
from optimagic.parameters.conversion import (
get_converter,
Expand Down Expand Up @@ -644,7 +640,7 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
logger=logger,
)[0]

raw_res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id)
res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id)

else:
multistart_options = get_internal_multistart_options_from_public(
Expand All @@ -658,7 +654,7 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
upper=internal_params.soft_upper_bounds,
)

raw_res = run_multistart_optimization(
res = run_multistart_optimization(
local_algorithm=problem.algorithm,
internal_problem=internal_problem,
x=x,
Expand All @@ -672,21 +668,6 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
# Process the result
# ==================================================================================

if problem.multistart is None:
res = process_single_result(
raw_res=raw_res,
converter=converter,
solver_type=problem.algorithm.algo_info.solver_type,
extra_fields=extra_fields,
)
else:
res = process_multistart_result(
raw_res=raw_res,
converter=converter,
solver_type=problem.algorithm.algo_info.solver_type,
extra_fields=extra_fields,
)

log_reader: LogReader[Any] | None
if logger is not None:
assert problem.logging is not None
Expand Down
89 changes: 9 additions & 80 deletions src/optimagic/optimization/process_results.py
Original file line number Diff line number Diff line change
@@ -1,91 +1,29 @@
from dataclasses import replace
import copy
from typing import Any

import numpy as np

from optimagic.optimization.algorithm import InternalOptimizeResult
from optimagic.optimization.convergence_report import get_convergence_report
from optimagic.optimization.optimize_result import MultistartInfo, OptimizeResult
from optimagic.parameters.conversion import Converter
from optimagic.typing import AggregationLevel, Direction, ExtraResultFields
from optimagic.utilities import isscalar


def process_single_result(
raw_res: InternalOptimizeResult,
converter: Converter,
solver_type: AggregationLevel,
extra_fields: ExtraResultFields,
) -> OptimizeResult:
"""Process an internal optimizer result."""
params = converter.params_from_internal(raw_res.x)
if isscalar(raw_res.fun):
fun = float(raw_res.fun)
elif solver_type == AggregationLevel.LIKELIHOOD:
fun = float(np.sum(raw_res.fun))
elif solver_type == AggregationLevel.LEAST_SQUARES:
fun = np.dot(raw_res.fun, raw_res.fun)

if extra_fields.direction == Direction.MAXIMIZE:
fun = -fun

if raw_res.history is not None:
conv_report = get_convergence_report(
history=raw_res.history, direction=extra_fields.direction
)
else:
conv_report = None

out = OptimizeResult(
params=params,
fun=fun,
start_fun=extra_fields.start_fun,
start_params=extra_fields.start_params,
algorithm=extra_fields.algorithm,
direction=extra_fields.direction.value,
n_free=extra_fields.n_free,
message=raw_res.message,
success=raw_res.success,
n_fun_evals=raw_res.n_fun_evals,
n_jac_evals=raw_res.n_jac_evals,
n_hess_evals=raw_res.n_hess_evals,
n_iterations=raw_res.n_iterations,
status=raw_res.status,
jac=raw_res.jac,
hess=raw_res.hess,
hess_inv=raw_res.hess_inv,
max_constraint_violation=raw_res.max_constraint_violation,
history=raw_res.history,
algorithm_output=raw_res.info,
convergence_report=conv_report,
)
return out
from optimagic.typing import Direction, ExtraResultFields


def process_multistart_result(
raw_res: InternalOptimizeResult,
raw_res: OptimizeResult,
converter: Converter,
solver_type: AggregationLevel,
extra_fields: ExtraResultFields,
multistart_info: dict[str, Any],
) -> OptimizeResult:
"""Process results of internal optimizers."""
if raw_res.multistart_info is None:
raise ValueError("Multistart info is missing.")

if isinstance(raw_res, str):
res = _dummy_result_from_traceback(raw_res, extra_fields)
else:
res = process_single_result(
raw_res=raw_res,
converter=converter,
solver_type=solver_type,
extra_fields=extra_fields,
)

res = raw_res
info = _process_multistart_info(
raw_res.multistart_info,
multistart_info,
converter=converter,
solver_type=solver_type,
extra_fields=extra_fields,
)

Expand Down Expand Up @@ -118,24 +56,15 @@ def process_multistart_result(
def _process_multistart_info(
info: dict[str, Any],
converter: Converter,
solver_type: AggregationLevel,
extra_fields: ExtraResultFields,
) -> MultistartInfo:
starts = [converter.params_from_internal(x) for x in info["start_parameters"]]

optima = []
for res, start in zip(info["local_optima"], starts, strict=False):
replacements = {
"start_params": start,
"start_fun": None,
}

processed = process_single_result(
res,
converter=converter,
solver_type=solver_type,
extra_fields=replace(extra_fields, **replacements),
)
processed = copy.copy(res)
processed.start_params = start
processed.start_fun = None
optima.append(processed)

sample = [converter.params_from_internal(x) for x in info["exploration_sample"]]
Expand Down
Loading

0 comments on commit c1f0d2c

Please sign in to comment.