diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 5597c17..d25d0bb 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -22,6 +22,7 @@ jobs: - name: Install dependencies run: | python -m pip install --upgrade pip + python -m pip install coverage if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - name: Test with coverage run: | diff --git a/.prospector.yaml b/.prospector.yaml new file mode 100644 index 0000000..38b6c68 --- /dev/null +++ b/.prospector.yaml @@ -0,0 +1,31 @@ +strictness: high + +ignore-paths: + - docs + - venv + - x.py + +pep257: + run: true + disable: + - D203 + - D213 + +pyflakes: + disable: + - F821 + - F722 + +pep8: + disable: + - E721 + +pylint: + disable: + - too-many-locals + - too-many-arguments + - stop-iteration-return + - unidiomatic-typecheck + - invalid-name + - attribute-defined-outside-init + - too-many-instance-attributes \ No newline at end of file diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..cb1c589 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,3 @@ +Sphinx==3.3.0 +sphinx-automodapi==0.13 +sphinx-rtd-theme==0.5.0 \ No newline at end of file diff --git a/modcma/__init__.py b/modcma/__init__.py index e69de29..6d67363 100644 --- a/modcma/__init__.py +++ b/modcma/__init__.py @@ -0,0 +1,35 @@ +"""Entrypoint of Modular CMA-ES package.""" + +from .asktellcmaes import AskTellCMAES +from .modularcmaes import ModularCMAES, evaluate_bbob, fmin +from .parameters import Parameters, BIPOPParameters +from .population import Population +from .sampling import ( + gaussian_sampling, + sobol_sampling, + halton_sampling, + mirrored_sampling, + orthogonal_sampling, + Halton, + Sobol, +) +from .utils import timeit, ert + +__all__ = ( + "AskTellCMAES", + "ModularCMAES", + "evaluate_bbob", + "fmin", + "Parameters", + "BIPOPParameters", + "Population", + "gaussian_sampling", + "sobol_sampling", + "halton_sampling", + "mirrored_sampling", + "orthogonal_sampling", + "Halton", + "Sobol", + "timeit", + "ert", +) diff --git a/modcma/__main__.py b/modcma/__main__.py index 22c3333..90d9a57 100644 --- a/modcma/__main__.py +++ b/modcma/__main__.py @@ -1,44 +1,36 @@ +"""Allows the user to call the library as a cli-module.""" + from argparse import ArgumentParser -from .modularcmaes import ModularCMAES, evaluate_bbob +from .modularcmaes import evaluate_bbob -parser = ArgumentParser( - description='Run single function CMAES') +parser = ArgumentParser(description="Run single function CMAES") parser.add_argument( - '-f', "--fid", type=int, - help="bbob function id", required=False, default=5 + "-f", "--fid", type=int, help="bbob function id", required=False, default=5 ) parser.add_argument( - '-d', "--dim", type=int, - help="dimension", required=False, default=5 + "-d", "--dim", type=int, help="dimension", required=False, default=5 ) parser.add_argument( - '-i', "--iterations", type=int, + "-i", + "--iterations", + type=int, help="number of iterations per agent", - required=False, default=50 -) -parser.add_argument( - '-l', '--logging', required=False, - action='store_true', default=False -) -parser.add_argument( - '-L', '--label', type=str, required=False, - default="" -) -parser.add_argument( - "-s", "--seed", type=int, required=False, - default=42 -) -parser.add_argument( - "-p", "--data_folder", type=str, required=False + required=False, + default=50, ) parser.add_argument( - "-a", "--arguments", nargs='+', required=False + "-l", "--logging", required=False, action="store_true", default=False ) +parser.add_argument("-L", "--label", type=str, required=False, default="") +parser.add_argument("-s", "--seed", type=int, required=False, default=42) +parser.add_argument("-p", "--data_folder", type=str, required=False) +parser.add_argument("-a", "--arguments", nargs="+", required=False) args = vars(parser.parse_args()) -for arg in (args.pop("arguments") or []): +for arg in args.pop("arguments") or []: + # pylint: disable=exec-used exec(arg, None, args) evaluate_bbob(**args) diff --git a/modcma/asktellcmaes.py b/modcma/asktellcmaes.py index ee5ecf8..9d482b2 100755 --- a/modcma/asktellcmaes.py +++ b/modcma/asktellcmaes.py @@ -1,3 +1,4 @@ +"""Ask and tell interface to the Modular CMA-ES.""" import warnings import typing from collections import deque @@ -6,147 +7,158 @@ from .modularcmaes import ModularCMAES -def check_break_conditions(f:typing.Callable) -> typing.Callable: - '''Decorator function, checks for break conditions for the ~AskTellCMAES. - Raises a StopIteration if break_conditions are met for ~AskTellCMAES. +def check_break_conditions(f: typing.Callable) -> typing.Callable: + """Decorator function, checks for break conditions for the ~AskTellCMAES. - Parameters - ---------- - f: callable - A method on ~AskTellCMAES - - Raises - ------ - StopIteration - When any(~AskTellCMAES.break_conditions) == True - ''' + Raises a StopIteration if break_conditions are met for ~AskTellCMAES. + + Parameters + ---------- + f: callable + A method on ~AskTellCMAES + + Raises + ------ + StopIteration + When any(~AskTellCMAES.break_conditions) == True + + """ @wraps(f) def inner(self, *args, **kwargs) -> typing.Any: if any(self.break_conditions): - raise StopIteration("Break conditions reached, ignoring call to: " + - f.__qualname__) + raise StopIteration( + "Break conditions reached, ignoring call to: " + f.__qualname__ + ) return f(self, *args, **kwargs) + return inner + class AskTellCMAES(ModularCMAES): - '''Ask tell interface for the ModularCMAES ''' + """Ask tell interface for the ModularCMAES.""" def __init__(self, *args, **kwargs) -> None: - 'Override the fitness_function argument with an empty callable' + """Override the fitness_function argument with an empty callable.""" super().__init__(lambda: None, *args, **kwargs) def fitness_func(self, x: np.ndarray) -> None: - 'Overwrite function call for fitness_func, calls register_individual' + """Overwrite function call for fitness_func, calls register_individual.""" self.register_individual(x) def sequential_break_conditions(self, i: int, f: float) -> None: - '''Overwrite ~modcma.modularcmaes.ModularCMAES.sequential_break_conditions - Raises not implemented if sequential selection is enabled, which - is not supported in the ask-tell interface. - - Parameters - ---------- - i: int - The index of the currently sampled individual in the population - f: float - The fitness value of the currently sampled individual - - Raises - ------ - NotImplementedError - When self.parameters.sequential == True - ''' + """Overwrite ~modcma.modularcmaes.ModularCMAES.sequential_break_conditions. + + Raises not implemented if sequential selection is enabled, which + is not supported in the ask-tell interface. + + Parameters + ---------- + i: int + The index of the currently sampled individual in the population + f: float + The fitness value of the currently sampled individual + + Raises + ------ + NotImplementedError + When self.parameters.sequential == True + + """ if self.parameters.sequential: - raise NotImplementedError("Sequential selection is not implemented " - "for ask-tell interface") + raise NotImplementedError( + "Sequential selection is not implemented " "for ask-tell interface" + ) def step(self): - '''This method is disabled on this interface - + """Method is disabled on this interface. + Raises ------ NotImplementedError - ''' + + """ raise NotImplementedError("Step is undefined in this interface") def run(self): - '''This method is disabled on this interface - - Raises - ------ - NotImplementedError - ''' + """Method is disabled on this interface. + + Raises + ------ + NotImplementedError + + """ raise NotImplementedError("Run is undefined in this interface") def register_individual(self, x: np.ndarray) -> None: - '''Add new individuals to self.ask_queue + """Add new individuals to self.ask_queue. - Parameters - ---------- - x: np.ndarray - The vector to be added to the ask_queue - ''' - self.ask_queue.append(x) + Parameters + ---------- + x: np.ndarray + The vector to be added to the ask_queue + + """ + self.ask_queue.append(x.reshape(-1, 1)) @check_break_conditions def ask(self) -> np.ndarray: - '''Retrieves the next indivual from the ask_queue. - If the ask_queue is not defined yet, it is defined and mutate is - called in order to fill it. - - Returns - ------- - np.ndarray - ''' - if not hasattr(self, 'ask_queue'): + """Retrieve the next indivual from the ask_queue. + + If the ask_queue is not defined yet, it is defined and mutate is + called in order to fill it. + + Returns + ------- + np.ndarray + + """ + if not hasattr(self, "ask_queue"): self.ask_queue = deque() self.mutate() - return self.ask_queue.popleft() + return self.ask_queue.popleft() @check_break_conditions - def tell(self, xi:np.ndarray, fi: float) -> None: - '''Processes a provided fitness value fi for a given individual xi. - - Parameters - ---------- - xi: np.ndarray - An individual previously returned by ask() - fi: float - The fitness value for xi - Raises - ------ - RuntimeError - When ask() is not called before tell() - ValueError - When an unknown xi is provided to the method - - Warns - ----- - UserWarning - When the same xi is provided more than once - ''' + def tell(self, xi: np.ndarray, fi: float) -> None: + """Process a provided fitness value fi for a given individual xi. + + Parameters + ---------- + xi: np.ndarray + An individual previously returned by ask() + fi: float + The fitness value for xi + Raises + ------ + RuntimeError + When ask() is not called before tell() + ValueError + When an unknown xi is provided to the method + + Warns + ----- + UserWarning + When the same xi is provided more than once + + """ + #pylint: disable=singleton-comparison if not self.parameters.population: raise RuntimeError("Call to tell without calling ask first is prohibited") - indices, *_ = np.where((self.parameters.population.x == xi).all(axis=0)) + indices, *_ = np.where((self.parameters.population.x == xi).all(axis=0)) if len(indices) == 0: raise ValueError("Unkown xi provided") - + for index in indices: - if self.parameters.population.f[index] == None: + if self.parameters.population.f[index] == None: # noqa self.parameters.population.f[index] = fi break else: warnings.warn("Repeated call to tell with same xi", UserWarning) self.parameters.population.f[index] = fi - + self.parameters.used_budget += 1 - if len(self.ask_queue) == 0 and (self.parameters.population.f != None).all(): + if len(self.ask_queue) == 0 and (self.parameters.population.f != None).all(): # noqa self.select() self.recombine() self.parameters.adapt() - self.mutate() - - - - + self.mutate() diff --git a/modcma/modularcmaes.py b/modcma/modularcmaes.py index dfdf94b..2821686 100644 --- a/modcma/modularcmaes.py +++ b/modcma/modularcmaes.py @@ -1,156 +1,203 @@ +"""Main implementation of Modular CMA-ES.""" import os -from datetime import datetime +from itertools import islice +from typing import List, Callable import numpy as np -from typing import List, Callable + from .parameters import Parameters from .population import Population from .utils import timeit, ert class ModularCMAES: - '''The main class of the configurable CMA ES continous optimizer. + r"""The main class of the configurable CMA ES continous optimizer. + + Attributes + ---------- + _fitness_func: callable + The objective function to be optimized + parameters: Parameters + All the parameters of the CMA ES algorithm are stored in + the parameters object. Note if a parameters object is not + explicitly passed, all \*args and \**kwargs passed into the + constructor of a ModularCMAES are directly passed into + the constructor of a Parameters object. + + See Also + -------- + modcma.parameters.Parameters + + """ - Attributes - ---------- - _fitness_func: callable - The objective function to be optimized - parameters: Parameters - All the parameters of the CMA ES algorithm are stored in - the parameters object. Note if a parameters object is not - explicitly passed, all \*args and \**kwargs passed into the - constructor of a ModularCMAES are directly passed into - the constructor of a Parameters object. - See Also - -------- - modcma.parameters.Parameters - - ''' parameters: "Parameters" _fitness_func: Callable - def __init__(self, fitness_func, *args, parameters=None, **kwargs) -> None: + def __init__( + self, fitness_func: Callable, *args, parameters=None, **kwargs + ) -> None: + """Set _fitness_func and forwards all other parameters to Parameters object.""" self._fitness_func = fitness_func - self.parameters = parameters if isinstance( - parameters, Parameters - ) else Parameters(*args, **kwargs) + self.parameters = ( + parameters + if isinstance(parameters, Parameters) + else Parameters(*args, **kwargs) + ) def mutate(self) -> None: - '''Returns a mutation generator, which performs mutation. + """Apply mutation operation. + First, a directional vector zi is sampled from a sampler object as defined in the self.parameters object. Then, this zi vector is - multiplied with the eigenvalues D, and the dot product is taken with the + multiplied with the eigenvalues D, and the dot product is taken with the eigenvectors B of the covariance matrix C in order to create a scaled directional mutation vector yi. By scaling this vector with current population mean m, and the step size sigma, a new individual xi is created. The self.fitness_func is called in order to compute the fitness of the newly created - individuals. + individuals. - If the step size adaptation method is 'tpa', two less 'normal' + If the step size adaptation method is 'tpa', two less 'normal' individuals are created. - ''' - y, x, f = [], [], [] - n_offspring = self.parameters.lambda_ - if self.parameters.step_size_adaptation == 'tpa' and self.parameters.old_population: - n_offspring -= 2 - tpa_mutation(self.fitness_func, self.parameters, x, y, f) + #TODO: make bound correction vectorized and integrate with tpa + """ + perform_tpa = bool( + self.parameters.step_size_adaptation == "tpa" + and self.parameters.old_population + ) + + n_offspring = int(self.parameters.lambda_ - (2 * perform_tpa)) - for i in range(1, n_offspring + 1): - - zi = next(self.parameters.sampler) + if not self.parameters.sequential and not self.parameters.bound_correction: + z = np.hstack(tuple(islice(self.parameters.sampler, n_offspring))) if self.parameters.threshold_convergence: - zi = scale_with_threshold(zi, self.parameters.threshold) - - yi = np.dot(self.parameters.B, self.parameters.D * zi) - xi = self.parameters.m + (self.parameters.sigma * yi) - - xi, out_of_bounds = correct_bounds(xi, self.parameters.ub, - self.parameters.lb, self.parameters.bound_correction) - - self.parameters.n_out_of_bounds += out_of_bounds - - fi = self.fitness_func(xi) - [a.append(v) for a, v in ((y, yi), (x, xi), (f, fi),)] - - if self.sequential_break_conditions(i, fi): - break - - self.parameters.population = Population( - np.hstack(x), - np.hstack(y), - np.array(f)) + z = scale_with_threshold(z, self.parameters.threshold) + + y = np.dot(self.parameters.B, self.parameters.D * z) + x = self.parameters.m + (self.parameters.sigma * y) + f = np.array(tuple(map(self.fitness_func, x.T))) + + else: + x, y, f = [], [], [] + for i in range(1, n_offspring + 1): + zi = next(self.parameters.sampler) + if self.parameters.threshold_convergence: + zi = scale_with_threshold(zi, self.parameters.threshold) + + yi = np.dot(self.parameters.B, self.parameters.D * zi) + xi = self.parameters.m + (self.parameters.sigma * yi) + + xi, out_of_bounds = correct_bounds( + xi, + self.parameters.ub, + self.parameters.lb, + self.parameters.bound_correction, + ) + + self.parameters.n_out_of_bounds += out_of_bounds + + fi = self.fitness_func(xi) + for a, v in ((y, yi), (x, xi), (f, fi)): + a.append(v) + + if self.sequential_break_conditions(i, fi): + break + + x = np.hstack(x) + y = np.hstack(y) + f = np.array(f) + + if perform_tpa: + yt, xt, ft = tpa_mutation(self.fitness_func, self.parameters) + y = np.c_[yt, y] + x = np.c_[xt, x] + f = np.r_[ft, f] + + self.parameters.population = Population(x, y, f) def select(self) -> None: - '''Selection of best individuals in the population + """Selection of best individuals in the population. + The population is sorted according to their respective fitness values. Normally, the mu best individuals would be selected afterwards. - However, because the option of active update is available, (and we could - potentially need the mu worst individuals) the lambda best indivduals are - selected. In recombination, only the mu best individuals are used to recompute - the mean, so implicited selection happens there. + However, because the option of active update is available, (and we could + potentially need the mu worst individuals) the lambda best indivduals are + selected. In recombination, only the mu best individuals are used to recompute + the mean, so implicited selection happens there. If elistism is selected as an option, the mu best individuals of the old - population are added to the pool of indivduals before sorting. + population are added to the pool of indivduals before sorting. If selection is to be performed pairwise, the only the best individuals of sequential pairs are used, the others are discarded. The intended use for this functionality is with mirrored sampling, in order to counter the bias generated by this sampling method. This method cannot be performed when there - is an odd number of individuals in the population. - ''' - if self.parameters.mirrored == 'mirrored pairwise': + is an odd number of individuals in the population. + """ + if self.parameters.mirrored == "mirrored pairwise": if not len(self.parameters.population.f) % 2 == 0: raise ValueError( - 'Cannot perform pairwise selection with ' - 'an odd number of indivuduals' + "Cannot perform pairwise selection with " + "an odd number of indivuduals" ) - indices = [int(np.argmin(x) + (i * 2)) - for i, x in enumerate(np.split( - self.parameters.population.f, - len(self.parameters.population.f) // 2)) - ] + indices = [ + int(np.argmin(x) + (i * 2)) + for i, x in enumerate( + np.split( + self.parameters.population.f, + len(self.parameters.population.f) // 2, + ) + ) + ] self.parameters.population = self.parameters.population[indices] if self.parameters.elitist and self.parameters.old_population: self.parameters.population += self.parameters.old_population[ - : self.parameters.mu] + : self.parameters.mu + ] self.parameters.population.sort() self.parameters.population = self.parameters.population[ - : self.parameters.lambda_] + : self.parameters.lambda_ + ] if self.parameters.population.f[0] < self.parameters.fopt: self.parameters.fopt = self.parameters.population.f[0] self.parameters.xopt = self.parameters.population.x[:, 0] def recombine(self) -> None: - '''Recombination of new individuals - In the CMAES, recombination is not as explicit as in for example + """Recombination of new individuals. + + In the CMAES, recombination is not as explicit as in for example a genetic algorithm. In the CMAES, recombination happens though the - moving of the mean m, by multiplying the old mean with a weighted - combination of the current mu best individuals. + moving of the mean m, by multiplying the old mean with a weighted + combination of the current mu best individuals. TODO: check if this should be moved to parameters - ''' + """ self.parameters.m_old = self.parameters.m.copy() - self.parameters.m = self.parameters.m_old + (1 * ( - (self.parameters.population.x[:, :self.parameters.mu] - - self.parameters.m_old) @ - self.parameters.pweights).reshape(-1, 1) + self.parameters.m = self.parameters.m_old + ( + 1 + * ( + ( + self.parameters.population.x[:, : self.parameters.mu] + - self.parameters.m_old + ) + @ self.parameters.pweights + ).reshape(-1, 1) ) def step(self) -> bool: - '''The step method runs one iteration of the optimization process. - The method is called within the self.run loop. - In there, a while loop runs until this step - function returns a Falsy value. + """The step method runs one iteration of the optimization process. + + The method is called within the self.run loop. There, a while loop runs + until this step function returns a Falsy value. Returns ------- bool - Denoting whether to keep running this step function. - ''' + Denoting whether to keep running this step function. + """ self.mutate() self.select() self.recombine() @@ -158,8 +205,7 @@ def step(self) -> bool: return not any(self.break_conditions) def sequential_break_conditions(self, i: int, f: float) -> bool: - '''Method returning a boolean value, indicating whether there are any - sequential break conditions. + """Indicator whether there are any sequential break conditions. Parameters ---------- @@ -172,46 +218,48 @@ def sequential_break_conditions(self, i: int, f: float) -> bool: Returns ------- bool - ''' + + """ if self.parameters.sequential: - return (f < self.parameters.fopt and - i >= self.parameters.seq_cutoff and ( - self.parameters.mirrored != 'mirrored pairwise' - or i % 2 == 0 - ) - ) + return ( + f < self.parameters.fopt + and i >= self.parameters.seq_cutoff + and (self.parameters.mirrored != "mirrored pairwise" or i % 2 == 0) + ) return False def run(self): - '''Runs the step method until step method retuns a falsy value + """Run the step method until step method retuns a falsy value. Returns ------- ModularCMAES - ''' + + """ while self.step(): pass return self @property def break_conditions(self) -> List[bool]: - '''Returns a list with break conditions based on the - interal state (parameters) of the optimization algorithm. + """A list with break conditions based on the parameters of the CMA-ES. Returns ------- [bool, bool] - ''' + + """ if self.parameters.n_generations: return [self.parameters.t >= self.parameters.n_generations] return [ self.parameters.target >= self.parameters.fopt, - self.parameters.used_budget >= self.parameters.budget + self.parameters.used_budget >= self.parameters.budget, ] def fitness_func(self, x: np.ndarray) -> float: - '''Wrapper function for calling self._fitness_func - adds 1 to self.parameters.used_budget for each fitnes function + """Wrapper function for calling self._fitness_func. + + Adds 1 to self.parameters.used_budget for each fitnes function call. Parameters @@ -220,55 +268,57 @@ def fitness_func(self, x: np.ndarray) -> float: array on which to call the objective/fitness function Returns - ------- + ------- float - ''' + + """ self.parameters.used_budget += 1 return self._fitness_func(x.flatten()) def __repr__(self): + """Representation of ModularCMA-ES.""" return f"<{self.__class__.__qualname__}: {self._fitness_func}>" def __str__(self): + """String representation of ModularCMA-ES.""" return repr(self) -def tpa_mutation(fitness_func: Callable, parameters: "Parameters", x: list, y: list, f: list) -> None: - '''Helper function for applying the tpa mutation step. - The code was mostly taken from the ModEA framework, - and there a slight differences with the procedure as defined in: - Nikolaus Hansen. CMA-ES with two-point step-size adaptation.CoRR, abs/0805.0231,2008. - The function should not be used outside of the ModularCMAES optimizer - Parameters - ---------- - fitness_func: typing.Callable - A fitness function to be optimized - parameters: Parameters - A modcma Parameters object - x: list - A list of new individuals - y: list - A list of new mutation vectors - f: list - A list of fitnesses - ''' - - yi = ((parameters.m - parameters.m_old) / - parameters.sigma) - y.extend([yi, -yi]) - x.extend([ - parameters.m + (parameters.sigma * yi), - parameters.m + (parameters.sigma * -yi) - ]) - f.extend(list(map(fitness_func, x))) +def tpa_mutation(fitness_func: Callable, parameters: "Parameters") -> None: + """Helper function for applying the tpa mutation step. + + The code was mostly taken from the ModEA framework, + and there a slight differences with the procedure as defined in: + Nikolaus Hansen. CMA-ES with two-point step-size adaptation.CoRR, abs/0805.0231,2008. + The function should not be used outside of the ModularCMAES optimizer + + Parameters + ---------- + fitness_func: typing.Callable + A fitness function to be optimized + parameters: Parameters + A modcma Parameters object + x: list + A list of new individuals + y: list + A list of new mutation vectors + f: list + A list of fitnesses + + """ + yi = (parameters.m - parameters.m_old) / parameters.sigma + y = np.c_[yi, -yi] + x = parameters.m + (parameters.sigma * y[:, :2]) + f = np.array(list(map(fitness_func, x[:, :2].T))) if f[1] < f[0]: parameters.rank_tpa = -parameters.a_tpa else: - parameters.rank_tpa = ( - parameters.a_tpa + parameters.b_tpa) + parameters.rank_tpa = parameters.a_tpa + parameters.b_tpa + return y, x, f -def scale_with_threshold(z:np.ndarray, threshold:float) -> np.ndarray: - '''Function for scaling a vector z to have length > threshold + +def scale_with_threshold(z: np.ndarray, threshold: float) -> np.ndarray: + """Function for scaling a vector z to have length > threshold. Used for threshold convergence. @@ -283,54 +333,58 @@ def scale_with_threshold(z:np.ndarray, threshold:float) -> np.ndarray: ------- np.ndarray a scaled version of z - ''' + """ length = np.linalg.norm(z) if length < threshold: new_length = threshold + (threshold - length) - z *= (new_length / length) + z *= new_length / length return z -def correct_bounds(x:np.ndarray, ub:np.ndarray, - lb:np.ndarray, correction_method:str) -> np.ndarray: - '''Bound correction function - Rescales x to fall within the lower lb and upper - bounds ub specified. Available strategies are: - - None: Don't perform any boundary correction - - unif_resample: Resample each coordinate out of bounds uniformly within bounds - - mirror: Mirror each coordinate around the boundary - - COTN: Resample each coordinate out of bounds using the one-sided normal - distribution with variance 1/3 (bounds scaled to [0,1]) - - saturate: Set each out-of-bounds coordinate to the boundary - - toroidal: Reflect the out-of-bounds coordinates to the oposite bound inwards - - Parameters - ---------- - x: np.ndarray - vector of which the bounds should be corrected - ub: float - upper bound - lb: float - lower bound - correction_method: string - type of correction to perform - Returns - ------- - np.ndarray - bound corrected version of x - bool - whether the population was out of bounds +def correct_bounds( + x: np.ndarray, ub: np.ndarray, lb: np.ndarray, correction_method: str +) -> np.ndarray: + """Bound correction function. + + Rescales x to fall within the lower lb and upper + bounds ub specified. Available strategies are: + - None: Don't perform any boundary correction + - unif_resample: Resample each coordinate out of bounds uniformly within bounds + - mirror: Mirror each coordinate around the boundary + - COTN: Resample each coordinate out of bounds using the one-sided normal + distribution with variance 1/3 (bounds scaled to [0,1]) + - saturate: Set each out-of-bounds coordinate to the boundary + - toroidal: Reflect the out-of-bounds coordinates to the oposite bound inwards + + Parameters + ---------- + x: np.ndarray + vector of which the bounds should be corrected + ub: float + upper bound + lb: float + lower bound + correction_method: string + type of correction to perform + + Returns + ------- + np.ndarray + bound corrected version of x + bool + whether the population was out of bounds + + Raises + ------ + ValueError + When an unkown value for correction_method is provided - Raises - ------ - ValueError - When an unkown value for correction_method is provided - ''' + """ out_of_bounds = np.logical_or(x > ub, x < lb) if not any(out_of_bounds): return x, False - + if not correction_method: return x, True @@ -339,10 +393,12 @@ def correct_bounds(x:np.ndarray, ub:np.ndarray, if correction_method == "mirror": x[out_of_bounds] = lb + (ub - lb) * np.abs( - y - np.floor(y) - np.mod(np.floor(y), 2)) + y - np.floor(y) - np.mod(np.floor(y), 2) + ) elif correction_method == "COTN": x[out_of_bounds] = lb + (ub - lb) * np.abs( - (y > 0) - np.abs(np.random.normal(0, 1/3, size=y.shape))) + (y > 0) - np.abs(np.random.normal(0, 1 / 3, size=y.shape)) + ) elif correction_method == "unif_resample": x[out_of_bounds] = np.random.uniform(lb, ub) elif correction_method == "saturate": @@ -350,24 +406,24 @@ def correct_bounds(x:np.ndarray, ub:np.ndarray, elif correction_method == "toroidal": x[out_of_bounds] = lb + (ub - lb) * np.abs(y - np.floor(y)) else: - raise ValueError( - f"Unknown argument: {correction_method} for correction_method" - ) + raise ValueError(f"Unknown argument: {correction_method} for correction_method") return x, True + @timeit def evaluate_bbob( - fid, - dim, - iterations=50, - label='', - logging=False, - data_folder=None, - seed=42, - instance=1, - target_precision=1e-8, - **kwargs): - '''Helper function to evaluate a ModularCMAES on the BBOB test suite. + fid, + dim, + iterations=50, + label="", + logging=False, + data_folder=None, + seed=42, + instance=1, + target_precision=1e-8, + **kwargs, +): + """Helper function to evaluate a ModularCMAES on the BBOB test suite. Parameters ---------- @@ -381,13 +437,17 @@ def evaluate_bbob( The label to be given to the run, used for logging with BBOB logging: bool = False Specifies whether to use logging - seed: int = 42 + data_folder: str = None + File path where to store data when logging = True + seed: int = 42 The random seed to be used instance: int = 1 The bbob function instance + target_precision: float = 1e-8 + The target precision for the objective function value **kwargs These are directly passed into the instance of ModularCMAES, - in this manner parameters can be specified for the optimizer. + in this manner parameters can be specified for the optimizer. Returns ------- @@ -395,47 +455,56 @@ def evaluate_bbob( The number of evaluations for each run of the optimizer fopts The best fitness values for each run of the optimizer - ''' + + """ # This speeds up the import, this import is quite slow, so import it lazy here + # pylint: disable=import-outside-toplevel from IOHexperimenter import IOH_function, IOH_logger - + evals, fopts = np.array([]), np.array([]) if seed: np.random.seed(seed) - fitness_func = IOH_function(fid, dim, instance, target_precision=target_precision, suite="BBOB") - + fitness_func = IOH_function( + fid, dim, instance, target_precision=target_precision, suite="BBOB" + ) + if logging: data_location = data_folder if os.path.isdir(data_folder) else os.getcwd() logger = IOH_logger(data_location, f"{label}F{fid}_{dim}D") fitness_func.add_logger(logger) - - print(f"Optimizing function {fid} in {dim}D for target {target_precision} with {iterations} iterations.") - + + print( + f"Optimizing function {fid} in {dim}D for target " + f"{target_precision} with {iterations} iterations." + ) + for idx in range(iterations): if idx > 0: fitness_func.reset() target = fitness_func.get_target() - - optimizer = ModularCMAES( - fitness_func, dim, target = target, **kwargs).run() + + optimizer = ModularCMAES(fitness_func, dim, target=target, **kwargs).run() evals = np.append(evals, fitness_func.evaluations) fopts = np.append(fopts, fitness_func.best_so_far_precision) - + result_string = ( - "FCE:\t{:10.8f}\t{:10.4f}\n" - "ERT:\t{:10.4f}\t{:10.4f}\n" - "{}/{} runs reached target" + "FCE:\t{:10.8f}\t{:10.4f}\n" + "ERT:\t{:10.4f}\t{:10.4f}\n" + "{}/{} runs reached target" + ) + print( + result_string.format( + np.mean(fopts), + np.std(fopts), + *ert(evals, optimizer.parameters.budget), + iterations, + ) ) - print(result_string.format( - np.mean(fopts), np.std(fopts), - *ert(evals, optimizer.parameters.budget), - iterations - )) return evals, fopts def fmin(func, dim, maxfun=None, **kwargs): - '''Minimize a function using the modular CMA-ES + """Minimize a function using the modular CMA-ES. Parameters ---------- @@ -447,7 +516,7 @@ def fmin(func, dim, maxfun=None, **kwargs): Maximum number of function evaluations to make. **kwargs These are directly passed into the instance of ModularCMAES, - in this manner parameters can be specified for the optimizer. + in this manner parameters can be specified for the optimizer. Returns ------- @@ -455,8 +524,9 @@ def fmin(func, dim, maxfun=None, **kwargs): The variables which minimize the function during this run fopt The value of function at found xopt - evals + evals The number of evaluations performed - ''' - cma = ModularCMAES(func, dim, budget = maxfun, **kwargs).run() + + """ + cma = ModularCMAES(func, dim, budget=maxfun, **kwargs).run() return cma.parameters.xopt, cma.parameters.fopt, cma.parameters.used_budget diff --git a/modcma/parameters.py b/modcma/parameters.py index 8b6f0fe..b137c9f 100644 --- a/modcma/parameters.py +++ b/modcma/parameters.py @@ -1,11 +1,13 @@ +"""Definition of Parameters objects, which are used by ModularCMA-ES.""" import os import pickle import warnings from collections import deque from typing import Generator, TypeVar + import numpy as np +from scipy import linalg -from . import utils, population from .utils import AnnotatedStruct from .sampling import ( gaussian_sampling, @@ -14,224 +16,227 @@ sobol_sampling, halton_sampling, ) -class Parameters(AnnotatedStruct): - '''AnnotatedStruct object for holding the parameters for the Configurable CMAES - Attributes - ---------- - d: int - The dimensionality of the problem - target: float = -float("inf") - The absolute target of the optimization problem - budget: int = None - The maximum number of iterations - n_generations: int = None - The number of generations to run the optimizer. If this value is specified - this will override the default break-conditions, and the optimizer will only - stop after n_generations. Target-reached and budget will be ignored. - lambda_: int = None - The number of offspring in the population - mu: int = None - The number of parents in the population - init_sigma: float = .5 - The initial value of sigma (step size) - a_tpa: float = .5 - Parameter used in TPA - b_tpa: float = 0. - Parameter used in TPA - cs: float = None - Learning rate for the cumulation of the step size control - cc: float = None - Learning rate for the rank-one update - cmu: float = None - Learning rate for the rank-mu update - c1: float = None - Learning rate for the rank-one update - seq_cutoff_factor: int = 1 - Used in sequential selection, the number of times mu individuals must be seen - before a sequential break can be performed - ub: np.array = None - The upper bound, used for bound correction and threshold convergence - lb: np.array = None - The lower bound, used for bound correction and threshold convergence - init_threshold: float = 0.2 - The initial length theshold used in treshold convergence - decay_factor: float = 0.995 - The decay for the threshold used in threshold covergence - max_resamples: int - The maximum amount of resamples which can be done when 'dismiss'-boundary correction is used - active: bool = False - Specifying whether to use active update. - [1] G. Jastrebski, D. V. Arnold, et al. Improving evolution strategies through - active covariance matrix adaptation. In Evolutionary Computation (CEC), - 2006 IEEE Congress on, pages 2814–2821. IEEE, 2006 - elitist: bool = False - Specifying whether to use an elitist approach - sequential: bool = False - Specifying whether to use sequential selection - [3] D. Brockhoff, A. Auger, N. Hansen, D. V. Arnold, and T. Hohm. - Mirrored Sampling and Sequential Selection for Evolution Strategies. - In R. Schaefer, C. Cotta, J. Kołodziej, and G. Rudolph, editors, Parallel - Problem Solving from Nature, PPSN XI: 11th International Conference, - Kraków, Poland, September 11-15, 2010, Proceedings, Part I, pages - 11–21, Berlin, Heidelberg, 2010. Springer Berlin Heidelberg. - threshold_convergence: bool = False - Specifying whether to use threshold convergence - [4] A. Piad-Morffis, S. Estevez-Velarde, A. Bolufe-Rohler, J. Montgomery, - and S. Chen. Evolution strategies with thresheld convergence. In - Evolutionary Computation (CEC), 2015 IEEE Congress on, pages 2097– - 2104, May 2015. - bound_correction: str = (None, 'saturate', 'unif_resample', 'COTN', 'toroidal', 'mirror',) - Specifying whether to use bound correction to enforce ub and lb - orthogonal: bool = False - Specifying whether to use orthogonal sampling - [5] H. Wang, M. Emmerich, and T. Bäck. Mirrored Orthogonal Sampling - with Pairwise Selection in Evolution Strategies. In Proceedings of the - 29th Annual ACM Symposium on Applied Computing, pages 154–156. - local_restart: str = (None, 'IPOP', ) - Specifying which local restart strategy should be used - IPOP: - [11] Anne Auger and Nikolaus Hansen. A restart cma evolution strategy - with increasing population size. volume 2, pages 1769–1776, 01 2005 - base_sampler: str = ('gaussian', 'sobol', 'halton',) - Denoting which base sampler to use, 'sobol', 'halton' can - be selected to sample from a quasi random sequence. - [6] A. Auger, M. Jebalia, and O. Teytaud. Algorithms (x, sigma, eta): - random mutations for evolution strategies. In Artificial Evolution: - 7th International Conference, Revised Selected Papers, pages 296–307. - Springer, 2006. - mirrored: str = (None, 'mirrored', mirrored pairwise', ) - Specifying whether to use mirrored sampling - [2] D. Brockhoff, A. Auger, N. Hansen, D. V. CMAEST. Hohm. - Mirrored Sampling and Sequential SelectioCMAESion Strategies. - In R. Schaefer, C. Cotta, J. Kołodziej, aCMAESh, editors, Parallel - Problem Solving from Nature, PPSN XI: 11tCMAESnal Conference, - Kraków, Poland, September 11-15, 2010, PrCMAESart I, pages - 11–21, Berlin, Heidelberg, 2010. SpringerCMAESelberg. - ACM, 2014. - weights_option: str = ('default', '1/mu', '1/2^mu', ) - Denoting the recombination weigths to be used. - [7] Sander van Rijn, Hao Wang, Matthijs van Leeuwen, and Thomas Bäck. 2016. - Evolving the Structure of Evolution Strategies. Computer 49, 5 (May 2016), 54–63. - step_size_adaptation: str = ('csa', 'tpa', 'msr', ) - Specifying which step size adaptation mechanism should be used. - csa: - [8] Nikolaus Hansen. The CMA evolution strategy: A tutorial.CoRR, abs/1604.00772, 2016 - tpa: - [9] Nikolaus Hansen. CMA-ES with two-point step-size adaptation.CoRR, abs/0805.0231,2008. - msr: - [10] Ouassim Ait Elhara, Anne Auger, and Nikolaus Hansen. - A Median Success Rule for Non-Elitist Evolution Strategies: Study of Feasibility. - In Blum et al. Christian, editor,Genetic and Evolutionary Computation Conference, - pages 415–422, Amsterdam, Nether-lands, July 2013. ACM, ACM Press. - population: TypeVar('Population') = None - The current population of individuals - old_population: TypeVar('Population') = None - The old population of individuals - termination_criteria: dict = {} - A dictionary of termination criteria - ipop_factor: int = 2 - The factor to increase the population after each resart (IPOP) - tolx: float = 10e-12 - Use to compute restart condition - tolup_sigma: float = 10e20 - Use to compute restart condition - condition_cov: float = 10e14 - Use to compute restart condition - ps_factor: float = 1. - Determines the frequence of exploration/expliotation - 1 is neutral, lower is more expliotative, higher is more explorative - sampler: generator - A generator object producing new samples - used_budget: int - The number of function evaluations used - fopt: float - The fitness of the current best individual - t: int - The number of generations - sigma_over_time: list - The value sigma has in each generation - best_fopts: list - The value of fopt in each generation - median_fitnesses: list - The median fitness value in each generation - best_fitnesses: list - The best fitness value observed in each generation - flat_fitnesses = deque - A deque containing boolean values denoting if a flat fitness value is observed - in recent generations - restarts: list - A list containing the t values (generations) where a restart has - taken place - seq_cutoff: int - The number of individuals that must be seen before a sequential break can be performed - diameter: float - The diameter of the search space - max_iter: float - The maximum number of iterations that can occur between two restarts. - nbin: int - Used to determine a window for equal function values - n_stagnation: int - Used to determine a window for stagnation - flat_fitness_index: int - Used to determine which ranked individual should be - the same as the first indivual in order to determine - flat fitness values. - sigma: float - The step size - m: np.ndarray - The mean value of the individuals - dm: np.ndarray - The difference in the new mean value of the individuals versus the old mean value. - pc: np.ndarray - The evolution path - ps: np.ndarray - The conjugate evolution path - C: np.ndarray - The covariance matrix - B: np.ndarray - The eigenvectors of the covariance matrix C - D: np.ndarray - The eigenvalues of the covariance matrix C - invC: np.ndarray - The result of C**-(1/2) - s: float - Used for TPA - rank_tpa: float - Used for TPA - weights: np.ndarray - The recombination weights. - pweights: np.ndarray - The positive recombination weights. - nweights: np.ndarray - The negative recombination weights, used in active update - mueff: float - The variance effective selection mass - damps: float - Used for adapting sigma with csa - chiN: np.ndarray - Value approaching E||N(0,I)|| - ds: float - Used for msr - threshold: float - The length threshold used in threshold convergence - last_restart: int - The generation in where the last restart has occored - max_resamples: int - The maximum amount of resamples which can be done when 'dismiss'-boundary correction is used - n_out_of_bounds: int - The number of individals that are sampled out of bounds - ''' +class Parameters(AnnotatedStruct): + """AnnotatedStruct object for holding the parameters for the ModularCMAES. + + Attributes + ---------- d: int + The dimensionality of the problem target: float = -float("inf") + The absolute target of the optimization problem budget: int = None + The maximum number of iterations n_generations: int = None + The number of generations to run the optimizer. If this value is specified + this will override the default break-conditions, and the optimizer will only + stop after n_generations. Target-reached and budget will be ignored. lambda_: int = None + The number of offspring in the population mu: int = None + The number of parents in the population init_sigma: float = .5 + The initial value of sigma (step size) a_tpa: float = .5 + Parameter used in TPA b_tpa: float = 0. + Parameter used in TPA + cs: float = None + Learning rate for the cumulation of the step size control + cc: float = None + Learning rate for the rank-one update + cmu: float = None + Learning rate for the rank-mu update + c1: float = None + Learning rate for the rank-one update + seq_cutoff_factor: int = 1 + Used in sequential selection, the number of times mu individuals must be seen + before a sequential break can be performed + ub: np.array = None + The upper bound, used for bound correction and threshold convergence + lb: np.array = None + The lower bound, used for bound correction and threshold convergence + init_threshold: float = 0.2 + The initial length theshold used in treshold convergence + decay_factor: float = 0.995 + The decay for the threshold used in threshold covergence + max_resamples: int + The maximum amount of resamples which can be done when 'dismiss'-boundary correction is used + active: bool = False + Specifying whether to use active update. + [1] G. Jastrebski, D. V. Arnold, et al. Improving evolution strategies through + active covariance matrix adaptation. In Evolutionary Computation (CEC), + 2006 IEEE Congress on, pages 2814–2821. IEEE, 2006 + elitist: bool = False + Specifying whether to use an elitist approach + sequential: bool = False + Specifying whether to use sequential selection + [3] D. Brockhoff, A. Auger, N. Hansen, D. V. Arnold, and T. Hohm. + Mirrored Sampling and Sequential Selection for Evolution Strategies. + In R. Schaefer, C. Cotta, J. Kołodziej, and G. Rudolph, editors, Parallel + Problem Solving from Nature, PPSN XI: 11th International Conference, + Kraków, Poland, September 11-15, 2010, Proceedings, Part I, pages + 11–21, Berlin, Heidelberg, 2010. Springer Berlin Heidelberg. + threshold_convergence: bool = False + Specifying whether to use threshold convergence + [4] A. Piad-Morffis, S. Estevez-Velarde, A. Bolufe-Rohler, J. Montgomery, + and S. Chen. Evolution strategies with thresheld convergence. In + Evolutionary Computation (CEC), 2015 IEEE Congress on, pages 2097– + 2104, May 2015. + bound_correction: str = (None, 'saturate', 'unif_resample', 'COTN', 'toroidal', 'mirror',) + Specifying whether to use bound correction to enforce ub and lb + orthogonal: bool = False + Specifying whether to use orthogonal sampling + [5] H. Wang, M. Emmerich, and T. Bäck. Mirrored Orthogonal Sampling + with Pairwise Selection in Evolution Strategies. In Proceedings of the + 29th Annual ACM Symposium on Applied Computing, pages 154–156. + local_restart: str = (None, 'IPOP', ) + Specifying which local restart strategy should be used + IPOP: + [11] Anne Auger and Nikolaus Hansen. A restart cma evolution strategy + with increasing population size. volume 2, pages 1769–1776, 01 2005 + base_sampler: str = ('gaussian', 'sobol', 'halton',) + Denoting which base sampler to use, 'sobol', 'halton' can + be selected to sample from a quasi random sequence. + [6] A. Auger, M. Jebalia, and O. Teytaud. Algorithms (x, sigma, eta): + random mutations for evolution strategies. In Artificial Evolution: + 7th International Conference, Revised Selected Papers, pages 296–307. + Springer, 2006. + mirrored: str = (None, 'mirrored', mirrored pairwise', ) + Specifying whether to use mirrored sampling + [2] D. Brockhoff, A. Auger, N. Hansen, D. V. CMAEST. Hohm. + Mirrored Sampling and Sequential SelectioCMAESion Strategies. + In R. Schaefer, C. Cotta, J. Kołodziej, aCMAESh, editors, Parallel + Problem Solving from Nature, PPSN XI: 11tCMAESnal Conference, + Kraków, Poland, September 11-15, 2010, PrCMAESart I, pages + 11–21, Berlin, Heidelberg, 2010. SpringerCMAESelberg. + ACM, 2014. + weights_option: str = ('default', '1/mu', '1/2^mu', ) + Denoting the recombination weigths to be used. + [7] Sander van Rijn, Hao Wang, Matthijs van Leeuwen, and Thomas Bäck. 2016. + Evolving the Structure of Evolution Strategies. Computer 49, 5 (May 2016), 54–63. + step_size_adaptation: str = ('csa', 'tpa', 'msr', ) + Specifying which step size adaptation mechanism should be used. + csa: + [8] Nikolaus Hansen. The CMA evolution strategy: A tutorial.CoRR, abs/1604.00772, 2016 + tpa: + [9] Nikolaus Hansen. CMA-ES with two-point step-size adaptation.CoRR, abs/0805.0231,2008. + msr: + [10] Ouassim Ait Elhara, Anne Auger, and Nikolaus Hansen. + A Median Success Rule for Non-Elitist Evolution Strategies: Study of Feasibility. + In Blum et al. Christian, editor,Genetic and Evolutionary Computation Conference, + pages 415–422, Amsterdam, Nether-lands, July 2013. ACM, ACM Press. + population: TypeVar('Population') = None + The current population of individuals + old_population: TypeVar('Population') = None + The old population of individuals + termination_criteria: dict = {} + A dictionary of termination criteria + ipop_factor: int = 2 + The factor to increase the population after each resart (IPOP) + tolx: float = 10e-12 + Use to compute restart condition + tolup_sigma: float = 10e20 + Use to compute restart condition + condition_cov: float = 10e14 + Use to compute restart condition + ps_factor: float = 1. + Determines the frequence of exploration/expliotation + 1 is neutral, lower is more expliotative, higher is more explorative + sampler: generator + A generator object producing new samples + used_budget: int + The number of function evaluations used + fopt: float + The fitness of the current best individual + t: int + The number of generations + sigma_over_time: list + The value sigma has in each generation + best_fopts: list + The value of fopt in each generation + median_fitnesses: list + The median fitness value in each generation + best_fitnesses: list + The best fitness value observed in each generation + flat_fitnesses = deque + A deque containing boolean values denoting if a flat fitness value is observed + in recent generations + restarts: list + A list containing the t values (generations) where a restart has + taken place + seq_cutoff: int + The number of individuals that must be seen before a sequential break can be performed + diameter: float + The diameter of the search space + max_iter: float + The maximum number of iterations that can occur between two restarts. + nbin: int + Used to determine a window for equal function values + n_stagnation: int + Used to determine a window for stagnation + flat_fitness_index: int + Used to determine which ranked individual should be + the same as the first indivual in order to determine + flat fitness values. + sigma: float + The step size + m: np.ndarray + The mean value of the individuals + dm: np.ndarray + The difference in the new mean value of the individuals versus the old mean value. + pc: np.ndarray + The evolution path + ps: np.ndarray + The conjugate evolution path + C: np.ndarray + The covariance matrix + B: np.ndarray + The eigenvectors of the covariance matrix C + D: np.ndarray + The eigenvalues of the covariance matrix C + invC: np.ndarray + The result of C**-(1/2) + s: float + Used for TPA + rank_tpa: float + Used for TPA + weights: np.ndarray + The recombination weights. + pweights: np.ndarray + The positive recombination weights. + nweights: np.ndarray + The negative recombination weights, used in active update + mueff: float + The variance effective selection mass + damps: float + Used for adapting sigma with csa + chiN: np.ndarray + Value approaching E||N(0,I)|| + ds: float + Used for msr + threshold: float + The length threshold used in threshold convergence + last_restart: int + The generation in where the last restart has occored + max_resamples: int + The maximum amount of resamples which can be done when 'dismiss'-boundary correction is used + n_out_of_bounds: int + The number of individals that are sampled out of bounds + + """ + + d: int + target: float = -float("inf") + budget: int = None + n_generations: int = None + lambda_: int = None + mu: int = None + init_sigma: float = 0.5 + a_tpa: float = 0.5 + b_tpa: float = 0.0 cs: float = None cc: float = None cmu: float = None @@ -242,31 +247,31 @@ class Parameters(AnnotatedStruct): init_threshold: float = 0.1 decay_factor: float = 0.995 max_resamples: int = 1000 - + active: bool = False elitist: bool = False sequential: bool = False threshold_convergence: bool = False - bound_correction: (None, 'saturate', 'unif_resample', - 'COTN', 'toroidal', 'mirror',) = None + bound_correction: ( + None, "saturate", "unif_resample", "COTN", "toroidal", "mirror") = None orthogonal: bool = False - local_restart: (None, 'IPOP', 'BIPOP',) = None - base_sampler: ('gaussian', 'sobol', 'halton',) = 'gaussian' - mirrored: (None, 'mirrored', 'mirrored pairwise',) = None - weights_option: ('default', 'equal', '1/2^lambda', ) = 'default' - step_size_adaptation: ('csa', 'tpa', 'msr', ) = 'csa' - - population: TypeVar('Population') = None - old_population: TypeVar('Population') = None + local_restart: (None, "IPOP", "BIPOP") = None + base_sampler: ("gaussian", "sobol", "halton") = "gaussian" + mirrored: (None, "mirrored", "mirrored pairwise" ) = None + weights_option: ("default","equal", "1/2^lambda") = "default" + step_size_adaptation: ("csa", "tpa", "msr") = "csa" + + population: TypeVar("Population") = None + old_population: TypeVar("Population") = None termination_criteria: dict = {} ipop_factor: int = 2 tolx: float = pow(10, -12) tolup_sigma: float = float(pow(10, 20)) condition_cov: float = float(pow(10, 14)) - ps_factor: float = 1. + ps_factor: float = 1.0 compute_termination_criteria: bool = False __modules__ = ( - "active", + "active", "elitist", "orthogonal", "sequential", @@ -276,10 +281,11 @@ class Parameters(AnnotatedStruct): "base_sampler", "weights_option", "local_restart", - "bound_correction" - ) + "bound_correction", + ) def __init__(self, *args, **kwargs) -> None: + """Intialize parameters. Calls sub constructors for different parameter types.""" super().__init__(*args, **kwargs) self.init_selection_parameters() self.init_fixed_parameters() @@ -288,14 +294,14 @@ def __init__(self, *args, **kwargs) -> None: self.init_local_restart_parameters() def get_sampler(self) -> Generator: - '''Function to return a sampler generator based on the values - of other parameters. + """Function to return a sampler generator based on the values of other parameters. Returns ------- generator a sampler - ''' + + """ sampler = { "gaussian": gaussian_sampling, "sobol": sobol_sampling, @@ -303,21 +309,20 @@ def get_sampler(self) -> Generator: }.get(self.base_sampler, gaussian_sampling)(self.d) if self.orthogonal: - n_samples = max(1, ( - self.lambda_ // (2 - (not self.mirrored))) - ( - 2 * self.step_size_adaptation == 'tpa') + n_samples = max( + 1, + (self.lambda_ // (2 - (not self.mirrored))) + - (2 * self.step_size_adaptation == "tpa"), ) sampler = orthogonal_sampling(sampler, n_samples) - + if self.mirrored: sampler = mirrored_sampling(sampler) - + return sampler - + def init_fixed_parameters(self) -> None: - '''Initialization function for parameters that - are not to be restarted during a optimization run. - ''' + """Initialization function for parameters that are not restarted during a optimization run.""" self.used_budget = 0 self.n_out_of_bounds = 0 self.budget = self.budget or int(1e4) * self.d @@ -332,119 +337,114 @@ def init_fixed_parameters(self) -> None: self.flat_fitnesses = deque(maxlen=self.d) self.restarts = [] self.bipop_parameters = BIPOPParameters( - self.lambda_, - self.budget, - self.mu / self.lambda_ + self.lambda_, self.budget, self.mu / self.lambda_ ) - + def init_selection_parameters(self) -> None: - '''Initialization function for parameters that are of influence - in selection/population control. - ''' - self.lambda_ = self.lambda_ or ( - 4 + np.floor(3 * np.log(self.d))).astype(int) - - if self.mirrored == 'mirrored pairwise': + """Initialization function for parameters that influence in selection.""" + self.lambda_ = self.lambda_ or (4 + np.floor(3 * np.log(self.d))).astype(int) + + if self.mirrored == "mirrored pairwise": self.seq_cutoff_factor = max(2, self.seq_cutoff_factor) if self.lambda_ % 2 != 0: - self.lambda_ += 1 - + self.lambda_ += 1 + self.mu = self.mu or self.lambda_ // 2 if self.mu > self.lambda_: warnings.warn( "\u03BC ({}) cannot be larger than \u03bb ({}). Modifying \u03bb to ({})".format( self.mu, self.lambda_, self.lambda_ // 2 - ), RuntimeWarning) + ), + RuntimeWarning, + ) self.mu = self.lambda_ // 2 - + self.seq_cutoff = self.mu * self.seq_cutoff_factor - self.sampler = self.get_sampler() - self.set_default('ub', np.ones((self.d, 1)) * 5) - self.set_default('lb', np.ones((self.d, 1)) * -5) + self.sampler = self.get_sampler() + self.set_default("ub", np.ones((self.d, 1)) * 5) + self.set_default("lb", np.ones((self.d, 1)) * -5) self.diameter = np.linalg.norm(self.ub - (self.lb)) - + def init_local_restart_parameters(self) -> None: - '''Initialization function for parameters that are used by - local restart strategies, i.e. IPOP. - ''' + """Initialization function for parameters for local restart strategies, i.e. IPOP. + + TODO: check if we can move this to separate object. + """ if len(self.restarts) == 0: self.restarts.append(self.t) - self.max_iter = 100 + 50 * (self.d + 3)**2 / np.sqrt(self.lambda_) + self.max_iter = 100 + 50 * (self.d + 3) ** 2 / np.sqrt(self.lambda_) self.nbin = 10 + int(np.ceil(30 * self.d / self.lambda_)) self.n_stagnation = min(int(120 + (30 * self.d / self.lambda_)), 20000) - self.flat_fitness_index = int(np.round(.1 + self.lambda_ / 4)) #TODO: check why this was ceil + self.flat_fitness_index = int( + np.round(0.1 + self.lambda_ / 4) + ) def init_adaptation_parameters(self) -> None: - '''Initialization function for parameters that are of influence - in the self-adaptive processes of the parameters. Examples are - recombination weights and learning rates for the covariance + """Initialization function for parameters for self-adaptive processes. + + Examples are recombination weights and learning rates for the covariance matrix adapation. - ''' - - if self.weights_option == 'equal': + """ + if self.weights_option == "equal": ws = np.ones(self.lambda_) / self.lambda_ self.weights = np.append(ws, ws[::-1] * -1) if self.lambda_ % 2 != 0: self.weights = np.append([1 / self.lambda_], self.weights) - elif self.weights_option == '1/2^lambda': - ws = 1 / 2**np.arange(1, self.lambda_ + 1) + ( - (1 / (2**self.lambda_)) / self.lambda_) + elif self.weights_option == "1/2^lambda": + ws = 1 / 2 ** np.arange(1, self.lambda_ + 1) + ( + (1 / (2 ** self.lambda_)) / self.lambda_ + ) self.weights = np.append(ws, ws[::-1] * -1) - if self.lambda_ % 2 != 0: - self.weights = np.append([1/self.lambda_**2], self.weights) + if self.lambda_ % 2 != 0: + self.weights = np.append([1 / self.lambda_ ** 2], self.weights) else: - self.weights = (np.log((self.lambda_ + 1) / 2) - - np.log(np.arange(1, self.lambda_ + 1))) - self.pweights = self.weights[:self.mu] - self.nweights = self.weights[self.mu:] - - self.mueff = ( - self.pweights.sum()**2 / - (self.pweights ** 2).sum() - ) - mueff_neg = ( - self.nweights.sum()**2 / - (self.nweights ** 2).sum() + self.weights = np.log((self.lambda_ + 1) / 2) - np.log( + np.arange(1, self.lambda_ + 1) + ) + self.pweights = self.weights[: self.mu] + self.nweights = self.weights[self.mu :] + + self.mueff = self.pweights.sum() ** 2 / (self.pweights ** 2).sum() + mueff_neg = self.nweights.sum() ** 2 / (self.nweights ** 2).sum() + self.c1 = self.c1 or 2 / ((self.d + 1.3) ** 2 + self.mueff) + self.cmu = self.cmu or min( + 1 - self.c1, (2 * ( + (self.mueff - 2 + (1 / self.mueff)) + / ((self.d + 2) ** 2 + (2 * self.mueff / 2)) + ) + ) ) - self.c1 = self.c1 or 2 / ((self.d + 1.3)**2 + self.mueff) - self.cmu = self.cmu or min(1 - self.c1, ( - 2 * ((self.mueff - 2 + (1 / self.mueff)) / - ((self.d + 2)**2 + (2 * self.mueff / 2))) - )) self.pweights = self.pweights / self.pweights.sum() amu_neg = 1 + (self.c1 / self.mu) amueff_neg = 1 + ((2 * mueff_neg) / (self.mueff + 2)) aposdef_neg = (1 - self.c1 - self.cmu) / (self.d * self.cmu) - self.nweights = (min(amu_neg, amueff_neg, aposdef_neg) / - np.abs(self.nweights).sum()) * self.nweights + self.nweights = ( + min(amu_neg, amueff_neg, aposdef_neg) / np.abs(self.nweights).sum() + ) * self.nweights self.weights = np.append(self.pweights, self.nweights) self.cc = self.cc or ( - (4 + (self.mueff / self.d)) / - (self.d + 4 + (2 * self.mueff / self.d)) + (4 + (self.mueff / self.d)) / (self.d + 4 + (2 * self.mueff / self.d)) ) - + self.cs = self.cs or ( - .3 if self.step_size_adaptation in ("msr", "tpa",) else - (self.mueff + 2) / (self.d + self.mueff + 5) + 0.3 if self.step_size_adaptation in ("msr", "tpa") + else (self.mueff + 2) / (self.d + self.mueff + 5) ) - - self.damps = ( - 1. + (2. * max(0., np.sqrt((self.mueff - 1) / - (self.d + 1)) - 1) + self.cs) - ) - self.chiN = ( - self.d ** .5 * (1 - 1 / (4 * self.d) + 1 / (21 * self.d ** 2)) + + self.damps = 1.0 + ( + 2.0 * max(0.0, np.sqrt((self.mueff - 1) / (self.d + 1)) - 1) + self.cs ) + self.chiN = self.d ** 0.5 * (1 - 1 / (4 * self.d) + 1 / (21 * self.d ** 2)) self.ds = 2 - (2 / self.d) def init_dynamic_parameters(self) -> None: - '''Initialization function of parameters that represent the internal - state of the CMAES algorithm, and are dynamic. Examples of such parameters - are the Covariance matrix C and its eigenvectors and the learning rate sigma. - ''' - + """Initialization function of parameters that represent the dynamic state of the CMA-ES. + + Examples of such parameters are the Covariance matrix C and its + eigenvectors and the learning rate sigma. + """ self.sigma = self.init_sigma self.m = np.random.rand(self.d, 1) self.dm = np.zeros(self.d) @@ -458,95 +458,103 @@ def init_dynamic_parameters(self) -> None: self.rank_tpa = None def adapt_sigma(self) -> None: - '''Method to adapt the step size sigma. There are three variants in - the methodology, namely: + """Method to adapt the step size sigma. + + There are three variants in implemented here, namely: ~ Two-Point Stepsize Adaptation (tpa) ~ Median Success Rule (msr) ~ Cummulative Stepsize Adapatation (csa) - One of these methods can be selected by setting the step_size_adaptation - parameter. - ''' - - if self.step_size_adaptation == 'tpa' and self.old_population: + One of these methods can be selected by setting the step_size_adaptation + parameter. + """ + if self.step_size_adaptation == "tpa" and self.old_population: self.s = ((1 - self.cs) * self.s) + (self.cs * self.rank_tpa) self.sigma *= np.exp(self.s) - elif self.step_size_adaptation == 'msr' and self.old_population: - k_succ = (self.population.f < np.median( - self.old_population.f)).sum() + elif self.step_size_adaptation == "msr" and self.old_population: + k_succ = (self.population.f < np.median(self.old_population.f)).sum() z = (2 / self.lambda_) * (k_succ - ((self.lambda_ + 1) / 2)) self.s = ((1 - self.cs) * self.s) + (self.cs * z) self.sigma *= np.exp(self.s / self.ds) else: self.sigma *= np.exp( - (self.cs / self.damps) * - ((np.linalg.norm(self.ps) / self.chiN) - 1) + (self.cs / self.damps) * ((np.linalg.norm(self.ps) / self.chiN) - 1) ) def adapt_covariance_matrix(self) -> None: - '''Method for adapting the covariance matrix. If the option `active` - is specified, active update of the covariance matrix is performed, using - negative weights. ''' + """Method for adapting the covariance matrix. + + If the option `active` is specified, active update of the covariance + matrix is performed, using negative weights. + """ hs = ( - np.linalg.norm(self.ps) / - np.sqrt(1 - np.power(1 - self.cs, 2 * - (self.used_budget / self.lambda_))) + np.linalg.norm(self.ps) + / np.sqrt(1 - np.power(1 - self.cs, 2 * (self.used_budget / self.lambda_))) ) < (1.4 + (2 / (self.d + 1))) * self.chiN dhs = (1 - hs) * self.cc * (2 - self.cc) - self.pc = (1 - self.cc) * self.pc + (hs * np.sqrt( - self.cc * (2 - self.cc) * self.mueff - )) * self.dm + self.pc = (1 - self.cc) * self.pc + ( + hs * np.sqrt(self.cc * (2 - self.cc) * self.mueff) + ) * self.dm - rank_one = (self.c1 * self.pc * self.pc.T) - old_C = (1 - (self.c1 * dhs) - self.c1 - - (self.cmu * self.pweights.sum())) * self.C + rank_one = self.c1 * self.pc * self.pc.T + old_C = ( + 1 - (self.c1 * dhs) - self.c1 - (self.cmu * self.pweights.sum()) + ) * self.C if self.active: weights = self.weights[::].copy() - weights = weights[:self.population.y.shape[1]] + weights = weights[: self.population.y.shape[1]] weights[weights < 0] = weights[weights < 0] * ( - self.d / - np.power(np.linalg.norm( - self.invC @ self.population.y[:, weights < 0], axis=0), 2) + self.d / np.power( + np.linalg.norm( + self.invC @ self.population.y[:, weights < 0], axis=0 + ), 2 + ) ) - rank_mu = self.cmu * \ - (weights * self.population.y @ self.population.y.T) + rank_mu = self.cmu * (weights * self.population.y @ self.population.y.T) else: - rank_mu = (self.cmu * - (self.pweights * self.population.y[:, :self.mu] @ - self.population.y[:, :self.mu].T)) + rank_mu = self.cmu * ( + self.pweights + * self.population.y[:, : self.mu] + @ self.population.y[:, : self.mu].T + ) self.C = old_C + rank_one + rank_mu def perform_eigendecomposition(self) -> None: - '''Method to perform eigendecomposition + """Method to perform eigendecomposition. + If sigma or the coveriance matrix has degenerated, the dynamic parameters are reset. - ''' - if np.isinf(self.C).any() or np.isnan(self.C).any() or (not 1e-16 < self.sigma < 1e6): + """ + if ( + np.isinf(self.C).any() + or np.isnan(self.C).any() + or (not 1e-16 < self.sigma < 1e6) + ): self.init_dynamic_parameters() else: self.C = np.triu(self.C) + np.triu(self.C, 1).T - self.D, self.B = np.linalg.eigh(self.C) + self.D, self.B = linalg.eigh(self.C) self.D = np.sqrt(self.D.astype(complex).reshape(-1, 1)).real self.invC = np.dot(self.B, self.D ** -1 * self.B.T) def adapt(self) -> None: - '''Method for adapting the internal state paramters. - The conjugate evolution path ps is calculated, in addition to - the difference in mean x values dm. Thereafter, sigma is adapated, - followed by the adapatation of the covariance matrix. - ''' + """Method for adapting the internal state parameters. + The conjugate evolution path ps is calculated, in addition to + the difference in mean x values dm. Thereafter, sigma is adapated, + followed by the adapatation of the covariance matrix. + TODO: eigendecomp is not neccesary to be beformed every iteration, says CMAES tut. + """ self.dm = (self.m - self.m_old) / self.sigma - self.ps = ((1 - self.cs) * self.ps + (np.sqrt( - self.cs * (2 - self.cs) * self.mueff - ) * self.invC @ self.dm) * self.ps_factor) + self.ps = (1 - self.cs) * self.ps + ( + np.sqrt(self.cs * (2 - self.cs) * self.mueff) * self.invC @ self.dm + ) * self.ps_factor self.adapt_sigma() self.adapt_covariance_matrix() - # TODO: eigendecomp is not neccesary to be beformed every iteration, says CMAES tut. self.perform_eigendecomposition() self.record_statistics() self.calculate_termination_criteria() @@ -555,17 +563,13 @@ def adapt(self) -> None: self.perform_local_restart() def perform_local_restart(self) -> None: - '''Method performing local restart, given that a restart - strategy is specified in the parameters. - ~ IPOP: after every restart, `lambda_` is multiplied with a factor. - ''' - + """Method performing local restart, if a restart strategy is specified.""" if self.local_restart: - if self.local_restart == 'IPOP': + if self.local_restart == "IPOP": self.mu *= self.ipop_factor self.lambda_ *= self.ipop_factor - elif self.local_restart == 'BIPOP': + elif self.local_restart == "BIPOP": self.bipop_parameters.adapt(self.used_budget) self.sigma = self.bipop_parameters.sigma self.lambda_ = self.bipop_parameters.lambda_ @@ -577,60 +581,68 @@ def perform_local_restart(self) -> None: self.init_local_restart_parameters() self.restarts.append(self.t) else: - warnings.warn("Termination criteria met: {}".format(", ".join( - name for name, value in self.termination_criteria.items() if value - )), RuntimeWarning) + warnings.warn( + "Termination criteria met: {}".format( + ", ".join( + name + for name, value in self.termination_criteria.items() + if value + ) + ), + RuntimeWarning, + ) @property def threshold(self) -> None: - '''Calculate threshold for mutation, used in threshold convergence.''' - return self.init_threshold * self.diameter * ( - (self.budget - self.used_budget) / self.budget - ) ** self.decay_factor + """Calculate threshold for mutation, used in threshold convergence.""" + return ( + self.init_threshold + * self.diameter + * ((self.budget - self.used_budget) / self.budget) ** self.decay_factor + ) @property def last_restart(self): - '''Returns the last index of self.restarts''' + """Return the last index of self.restarts.""" return self.restarts[-1] @staticmethod - def from_config_array(d:int, config_array: list) -> 'Parameters': - '''Instantiates a Parameters object from a configuration array + def from_config_array(d: int, config_array: list) -> "Parameters": + """Instantiate a Parameters object from a configuration array. Parameters ---------- d: int The dimensionality of the problem - + config_array: list - A list of length len(Parameters.__modules__), + A list of length len(Parameters.__modules__), containing ints from 0 to 2 Returns ------- A new Parameters instance - ''' + + """ if not len(config_array) == len(Parameters.__modules__): raise AttributeError( - "config_array must be of length " + - str(len(Parameters.__modules__)) - ) + "config_array must be of length " + str(len(Parameters.__modules__)) + ) parameters = dict() for name, cidx in zip(Parameters.__modules__, config_array): - options = getattr(getattr(Parameters, name), - "options", [False, True]) + options = getattr(getattr(Parameters, name), "options", [False, True]) if not len(options) > cidx: raise AttributeError( - f"id: {cidx} is invalid for {name} " - f"with options {', '.join(map(str, options))}" - ) + f"id: {cidx} is invalid for {name} " + f"with options {', '.join(map(str, options))}" + ) parameters[name] = options[cidx] return Parameters(d, **parameters) @staticmethod - def load(filename: str) -> 'Parameters': - '''Loads stored parameter objects from pickle - + def load(filename: str) -> "Parameters": + """Load stored parameter objects from pickle. + Parameters ---------- filename: str @@ -639,39 +651,37 @@ def load(filename: str) -> 'Parameters': Returns ------- A Parameters object - - ''' + + """ if not os.path.isfile(filename): raise OSError(f"{filename} does not exist") - + with open(filename, "rb") as f: parameters = pickle.load(f) if not isinstance(parameters, Parameters): raise AttributeError( - f"{filename} does not contain " - "a Parameters object" - ) + f"{filename} does not contain " "a Parameters object" + ) parameters.sampler = parameters.get_sampler() return parameters - def save(self, filename:str='parameters.pkl') -> None: - '''Saves a parameters object to pickle - + def save(self, filename: str = "parameters.pkl") -> None: + """Save a parameters object to pickle. + Parameters ---------- filename: str The name of the file to save to. - ''' - with open(filename, 'wb') as f: + + """ + with open(filename, "wb") as f: self.sampler = None pickle.dump(self, f) def record_statistics(self) -> None: - 'Method for recording metadata. ' + """Method for recording metadata.""" self.flat_fitnesses.append( - self.population.f[0] == self.population.f[ - self.flat_fitness_index - ] + self.population.f[0] == self.population.f[self.flat_fitness_index] ) self.t += 1 self.sigma_over_time.append(self.sigma) @@ -680,78 +690,84 @@ def record_statistics(self) -> None: self.median_fitnesses.append(np.median(self.population.f)) def calculate_termination_criteria(self) -> None: - '''Methods for computing restart criteria - Only computes when a local restart - strategy is specified, or when explicitly told to - to so, i.e.: self.compute_termination_criteria = True - ''' + """Method for computing restart criteria. + + Only computes when a local restart strategy is specified, or when explicitly + told to do so, i.e.: self.compute_termination_criteria = True + """ if self.local_restart or self.compute_termination_criteria: - _t = (self.t % self.d) + _t = self.t % self.d diag_C = np.diag(self.C.T) d_sigma = self.sigma / self.init_sigma - best_fopts = self.best_fitnesses[self.last_restart:] - median_fitnesses = self.median_fitnesses[self.last_restart:] - - self.termination_criteria = dict() if self.lambda_ > self.max_lambda_ else { - "max_iter": ( - self.t - self.last_restart > self.max_iter - ), - "equalfunvalues": ( - len(best_fopts) > self.nbin and - np.ptp(best_fopts[-self.nbin:]) == 0 - ), - "flat_fitness": ( - self.t - self.last_restart > self.flat_fitnesses.maxlen and - len(self.flat_fitnesses) == self.flat_fitnesses.maxlen and - np.sum(self.flat_fitnesses) > (self.d / 3) - ), - "tolx": np.all(( - np.append(self.pc.T, diag_C) - * d_sigma) < (self.tolx * self.init_sigma) - ), - "tolupsigma": ( - d_sigma > self.tolup_sigma * np.sqrt(self.D.max()) - ), - "conditioncov": np.linalg.cond(self.C) > self.condition_cov, - "noeffectaxis": np.all((1 * self.sigma * np.sqrt( - self.D[_t, 0]) * self.B[:, _t] + self.m) == self.m - ), - "noeffectcoor": np.any( - (.2 * self.sigma * np.sqrt(diag_C) + self.m) == self.m - ), - "stagnation": ( - self.t - self.last_restart > self.n_stagnation and ( - np.median(best_fopts[-int(.3 * self.t):]) >= - np.median(best_fopts[:int(.3 * self.t)]) and - np.median(median_fitnesses[-int(.3 * self.t):]) >= - np.median(median_fitnesses[:int(.3 * self.t)]) - ) - ) - } + best_fopts = self.best_fitnesses[self.last_restart :] + median_fitnesses = self.median_fitnesses[self.last_restart :] + + self.termination_criteria = ( + dict() + if self.lambda_ > self.max_lambda_ + else { + "max_iter": (self.t - self.last_restart > self.max_iter), + "equalfunvalues": ( + len(best_fopts) > self.nbin + and np.ptp(best_fopts[-self.nbin :]) == 0 + ), + "flat_fitness": ( + self.t - self.last_restart > self.flat_fitnesses.maxlen + and len(self.flat_fitnesses) == self.flat_fitnesses.maxlen + and np.sum(self.flat_fitnesses) > (self.d / 3) + ), + "tolx": np.all( + (np.append(self.pc.T, diag_C) * d_sigma) + < (self.tolx * self.init_sigma) + ), + "tolupsigma": (d_sigma > self.tolup_sigma * np.sqrt(self.D.max())), + "conditioncov": np.linalg.cond(self.C) > self.condition_cov, + "noeffectaxis": np.all( + ( + 1 * self.sigma * np.sqrt(self.D[_t, 0]) * self.B[:, _t] + + self.m + ) + == self.m + ), + "noeffectcoor": np.any( + (0.2 * self.sigma * np.sqrt(diag_C) + self.m) == self.m + ), + "stagnation": ( + self.t - self.last_restart > self.n_stagnation + and ( + np.median(best_fopts[-int(0.3 * self.t) :]) + >= np.median(best_fopts[: int(0.3 * self.t)]) + and np.median(median_fitnesses[-int(0.3 * self.t) :]) + >= np.median(median_fitnesses[: int(0.3 * self.t)]) + ) + ), + } + ) def update(self, parameters: dict, reset_default_modules=False): - '''Method to update the values of the Parameters object - based on a given dict of new parameters. + """Method to update the values of self based on a given dict of new parameters. Note that some updated parameters might be overridden by: self.init_selection_parameters() self.init_adaptation_parameters() self.init_local_restart_parameters() which are called at the end of this function. Use with caution. - + Parameters ---------- parameters: dict A dict with new parameter values - + reset_default_modules: bool = False - Whether to reset the modules back to their default values. - ''' + Whether to reset the modules back to their default values. + + """ if reset_default_modules: for name in Parameters.__modules__: - default_option, *_ = getattr(getattr(Parameters, name), - "options", [False, True]) + default_option, *_ = getattr( + getattr(Parameters, name), "options", [False, True] + ) setattr(self, name, default_option) for name, value in parameters.items(): @@ -764,55 +780,51 @@ def update(self, parameters: dict, reset_default_modules=False): self.init_local_restart_parameters() - class BIPOPParameters(AnnotatedStruct): - 'Seperate object which holds BIPOP specific parameters' + """Object which holds BIPOP specific parameters.""" lambda_init: int budget: int - mu_factor: float + mu_factor: float lambda_large: int = None budget_small: int = None budget_large: int = None used_budget: int = 0 - @property def large(self) -> bool: - 'Deternotes where to use a large regime or small regime' + """Determine where to use a large regime.""" if (self.budget_large >= self.budget_small) and self.budget_large > 0: - return True + return True return False @property def remaining_budget(self) -> int: - 'Compute the remaining budget' + """Compute the remaining budget.""" return self.budget - self.used_budget @property def lambda_(self) -> int: - 'Returns value for lambda, based which regime is active' + """Return value for lambda, based which regime is active.""" return self.lambda_large if self.large else self.lambda_small @property def sigma(self) -> float: - 'Return value for sigma, based on which regime is active' - return 2 if self.large else 2e-2 * np.random.random() - + """Return value for sigma, based on which regime is active.""" + return 2 if self.large else 2e-2 * np.random.uniform() + @property def mu(self) -> int: - 'Return value for mu, based on which ' + """Return value for mu.""" return np.floor(self.lambda_ * self.mu_factor).astype(int) - def adapt(self, used_budget: int) -> None: - 'Adapts the parameters for BIPOP on restart' - + """Adapt the parameters for BIPOP on restart.""" used_previous_iteration = used_budget - self.used_budget self.used_budget += used_previous_iteration - if self.lambda_large == None: - self.lambda_large = self.lambda_init * 2 + if self.lambda_large is None: + self.lambda_large = self.lambda_init * 2 self.budget_small = self.remaining_budget // 2 self.budget_large = self.remaining_budget - self.budget_small elif self.large: @@ -820,8 +832,11 @@ def adapt(self, used_budget: int) -> None: self.lambda_large *= 2 else: self.budget_small -= used_previous_iteration + + self.lambda_small = np.floor( + self.lambda_init + * (0.5 * self.lambda_large / self.lambda_init) ** (np.random.uniform() ** 2) + ).astype(int) - self.lambda_small = np.floor(self.lambda_init * ( - .5 * self.lambda_large / self.lambda_init - ) ** (np.random.random() ** 2) - ).astype(int) + if self.lambda_small % 2 != 0: + self.lambda_small += 1 \ No newline at end of file diff --git a/modcma/population.py b/modcma/population.py index 980a85a..b22ace7 100644 --- a/modcma/population.py +++ b/modcma/population.py @@ -1,41 +1,39 @@ -import itertools +"""TImplemention for the Population object used in the ModularCMA-ES.""" from typing import Any import numpy as np -from .utils import AnnotatedStruct +class Population: + """Object for holding a Population of individuals.""" -class Population(AnnotatedStruct): - '''AnnotatedStruct object for holding a Population of individuals. ''' - x: np.ndarray - y: np.ndarray - f: np.ndarray - - def __init__(self, *args, **kwargs): - 'Reshapes x and y' - super().__init__(*args, **kwargs) + def __init__(self, x, y, f): + """Reshape x and y.""" + self.x = x + self.y = y + self.f = f if len(self.x.shape) == 1: self.x = self.x.reshape(-1, 1) self.y = self.y.reshape(-1, 1) def sort(self) -> None: - '''Sorts the population according to their fitness values''' + """Sort the population according to their fitness values.""" rank = np.argsort(self.f) self.x = self.x[:, rank] self.y = self.y[:, rank] self.f = self.f[rank] def copy(self) -> "Population": - '''Returns a new population object, with it's variables copied + """Return a new population object, with it's variables copied. Returns - ------ + ------- Population - ''' - return Population(self.x, self.y, self.f) + + """ + return Population(self.x.copy(), self.y.copy(), self.f.copy()) def __add__(self, other: "Population") -> "Population": - '''Adds two population objects with each other + """Add two population objects with each other. Parameters ---------- @@ -43,68 +41,72 @@ def __add__(self, other: "Population") -> "Population": another population which is to be used to perform the addition Returns - ------ + ------- Population - ''' + """ if not isinstance(other, self.__class__): raise TypeError( - f"Other should be {self.__class__}" - f"got {other.__class__}" + f"Other should be {self.__class__}" f"got {other.__class__}" ) return Population( np.hstack([self.x, other.x]), np.hstack([self.y, other.y]), - np.append(self.f, other.f) + np.append(self.f, other.f), ) def __getitem__(self, key: Any) -> "Population": - '''Custom implemenation of the getitem method, allowing - for indexing the entire population object as if it were a np.ndarray - + """Method allowing for indexing the population object as if it were an np.ndarray. + Parameters ---------- key: int, [int], itertools.slice value by with to index the population Returns - ------ + ------- Population - ''' + """ if isinstance(key, int): return Population( self.x[:, key].reshape(-1, 1), self.y[:, key].reshape(-1, 1), - np.array([self.f[key]]) + np.array([self.f[key]]), ) - elif isinstance(key, slice): + if isinstance(key, slice): return Population( - self.x[:, key.start: key.stop: key.step], - self.y[:, key.start: key.stop: key.step], - self.f[key.start: key.stop: key.step] + self.x[:, key.start : key.stop : key.step], + self.y[:, key.start : key.stop : key.step], + self.f[key.start : key.stop : key.step], ) - elif isinstance(key, list) and all( - map(lambda x: isinstance(x, int) and x >= 0, key)): - return Population( - self.x[:, key], - self.y[:, key], - self.f[key] + if isinstance(key, list) and all( + map(lambda x: isinstance(x, int) and x >= 0, key) + ): + return Population(self.x[:, key], self.y[:, key], self.f[key]) + + raise KeyError( + "Key must be (list of non-negative) integer(s) or slice, not {}".format( + type(key) ) - else: - raise KeyError("Key must be (list of non-negative) integer(s) or slice, not {}" - .format(type(key))) + ) @property def n(self) -> int: + """The number of individuals in the population.""" return len(self.f) @property def d(self) -> int: + """The dimension of the individuals in the population.""" shape_ = list(self.x.shape) shape_.remove(self.n) return shape_[0] def __repr__(self) -> str: + """Representation of Population object.""" return f"" - + + def __str__(self) -> str: + """String representation of Population object.""" + return repr(self) diff --git a/modcma/sampling.py b/modcma/sampling.py index 2d32788..2229f53 100644 --- a/modcma/sampling.py +++ b/modcma/sampling.py @@ -1,12 +1,15 @@ +"""Module implementing various samplers.""" +import itertools from typing import Generator +from collections.abc import Iterator + import numpy as np -from scipy import stats, linalg -from sobol_seq import i4_sobol -from ghalton import Halton +from scipy import stats +from numba import vectorize, float64, int64 def gaussian_sampling(d: int) -> Generator[np.ndarray, None, None]: - '''Generator yielding random normal (gaussian) samples. + """Generator yielding random normal (gaussian) samples. Parameters ---------- @@ -16,13 +19,14 @@ def gaussian_sampling(d: int) -> Generator[np.ndarray, None, None]: Yields ------ numpy.ndarray - ''' + + """ while True: yield np.random.randn(d, 1) def sobol_sampling(d: int) -> Generator[np.ndarray, None, None]: - '''Generator yielding samples from a Sobol sequence + """Generator yielding samples from a Sobol sequence. Parameters ---------- @@ -32,15 +36,15 @@ def sobol_sampling(d: int) -> Generator[np.ndarray, None, None]: Yields ------ numpy.ndarray - ''' - seed = np.random.randint(2, max(3, d**2)) + + """ + sobol = Sobol(d, np.random.randint(2, max(3, d ** 2))) while True: - sample, seed = i4_sobol(d, max(seed, 2)) - yield stats.norm.ppf(sample).reshape(-1, 1) + yield stats.norm.ppf(next(sobol)).reshape(-1, 1) def halton_sampling(d: int) -> Generator[np.ndarray, None, None]: - '''Generator yielding samples from a Halton sequence + """Generator yielding samples from a Halton sequence. Parameters ---------- @@ -50,14 +54,16 @@ def halton_sampling(d: int) -> Generator[np.ndarray, None, None]: Yields ------ numpy.ndarray - ''' + + """ halton = Halton(d) while True: - yield stats.norm.ppf(halton.get(1)[0]).reshape(-1, 1) + yield stats.norm.ppf(next(halton)).reshape(-1, 1) def mirrored_sampling(sampler: Generator) -> Generator[np.ndarray, None, None]: - '''Generator yielding mirrored samples. + """Generator yielding mirrored samples. + For every sample from the input sampler (generator), both its original and complemented form are yielded. @@ -69,14 +75,18 @@ def mirrored_sampling(sampler: Generator) -> Generator[np.ndarray, None, None]: Yields ------ numpy.ndarray - ''' + + """ for sample in sampler: yield sample yield sample * -1 -def orthogonal_sampling(sampler: Generator, n_samples: int) -> Generator[np.ndarray, None, None]: - '''Generator yielding orthogonal samples. +def orthogonal_sampling( + sampler: Generator, n_samples: int +) -> Generator[np.ndarray, None, None]: + """Generator yielding orthogonal samples. + This function orthogonalizes , and succesively yields each of them. It uses the linalg.orth decomposition function of the scipy library. @@ -90,7 +100,8 @@ def orthogonal_sampling(sampler: Generator, n_samples: int) -> Generator[np.ndar Yields ------ numpy.ndarray - ''' + + """ samples = [] for sample in sampler: samples.append(sample) @@ -101,4 +112,169 @@ def orthogonal_sampling(sampler: Generator, n_samples: int) -> Generator[np.ndar samples = [s.reshape(-1, 1) for s in (Q.T * L).T] for _ in range(n_samples): yield samples.pop() - + + +class Halton(Iterator): + """Iterator implementing Halton Quasi random sequences. + + Attributes + ---------- + d: int + dimension + bases: np.ndarray + array of primes + index: itertools.count + current index + + """ + + def __init__(self, d, start=1): + """Compute the bases, and set index to start.""" + self.d = d + self.bases = self.get_primes(self.d) + self.index = itertools.count(start) + + @staticmethod + def get_primes(n: int) -> np.ndarray: + """Return n primes, starting from 0.""" + def inner(n_): + sieve = np.ones(n_ // 3 + (n_ % 6 == 2), dtype=np.bool) + for i in range(1, int(n_ ** 0.5) // 3 + 1): + if sieve[i]: + k = 3 * i + 1 | 1 + sieve[k * k // 3 :: 2 * k] = False + sieve[k * (k - 2 * (i & 1) + 4) // 3 :: 2 * k] = False + return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)] + + primes = inner(max(6, n)) + while len(primes) < n: + primes = inner(len(primes) ** 2) + return primes[:n] + + def __next__(self) -> np.ndarray: + """Return next Halton sequence.""" + return self.vectorized_next(next(self.index), self.bases) + + @staticmethod + @vectorize([float64(int64, int64)]) + def vectorized_next(index: int, base: int) -> float: + """Vectorized method for computing halton sequence.""" + d, x = 1, 0 + while index > 0: + index, remainder = divmod(index, base) + d *= base + x += remainder / d + return x + + +class Sobol(Iterator): + """Iterator implementing Sobol Quasi random sequences. + + This is an iterator version of the version implemented in the python + package: sobol-seq==0.2.0. This version is 4x faster due to better usage of + numpy vectorization. + + Attributes + ---------- + d: int + dimension + seed: int + sample seed + v: np.ndarray + array of sample directions + recipd: int + 1/(common denominator of the elements in v) + lastq: np.ndarray + vector containing last sample directions + + """ + + def __init__(self, d: int, seed: int = 0): + """Intialize the v matrix, used for generating Sobol sequences. + + The values for v and poly were taken from the python package sobol-seq. + """ + self.d = d + self.seed = np.floor(max(0, seed)).astype(int) + self.v = np.zeros((40, 30), dtype=int) + + self.v[0:40, 0] = np.ones(40) + self.v[2:40, 1] = np.r_[ + np.tile([1, 3], 3), + np.tile(np.r_[np.tile([3, 1], 4), np.tile([1, 3], 4)], 2), + ] + self.v[3:40, 2] = [ + 7,5,1,3,3,7,5,5,7,7,1,3,3,7,5,1,1,5,3,3,1,7,5,1,3,3,7,5,1,1,5,7,7,5, + 1,3,3 + ] + self.v[5:40, 3] = [ + 1,7,9,13,11,1,3,7,9,5,13,13,11,3,15,5,3,15,7,9,13,9,1,11,7,5,15,1, + 15,11,5,3,1,7,9 + ] + self.v[7:40, 4] = [ + 9,3,27,15,29,21,23,19,11,25,7,13,17,1,25,29,3,31,11, 5,23,27,19,21, + 5,1,17,13,7,15,9,31,9 + ] + self.v[13:40, 5] = [ + 37,33,7,5,11,39,63,27,17,15,23,29,3,21,13,31,25,9,49,33,19,29,11,19, + 27,15,25 + ] + self.v[19:40, 6] = [ + 13,33,115,41,79,17,29,119,75,73,105,7,59,65,21,3,113,61,89,45,107 + ] + self.v[37:40, 7] = [7, 23, 39] + poly = [ + 1,3,7,11,13,19,25,37,59,47,61,55,41,67,97,91,109,103,115,131,193,137, + 145,143,241,157,185,167,229,171,213,191,253,203,211,239,247,285,369,299 + ] + + # Find the number of bits in ATMOST. + maxcol = Sobol.h1(2 ** 30 - 1) + + # Initialize row 1 of V. + self.v[0, :maxcol] = 1 + + for i in range(2, self.d + 1): + j = poly[i - 1] + m = int(np.log2(j)) + includ = np.fromiter(format(j, "b")[1:], dtype=np.int) + powers = 2 ** np.arange(1, m + 1) + + for j in range(m + 1, maxcol + 1): + mask = np.arange(j - 1)[::-1][:m] + self.v[i - 1, j - 1] = np.bitwise_xor.reduce( + np.r_[ + self.v[i - 1, j - m - 1], powers * self.v[i - 1, mask] * includ + ] + ) + + i = np.arange(maxcol - 1)[::-1] + powers = 2 ** np.arange(1, len(i) + 1) + self.v[: self.d, i] = self.v[: self.d, i] * powers + + self.recipd = 1.0 / (2 * powers[-1]) + self.lastq = np.zeros(self.d, dtype=int) + + for loc in map(self.l0, range(self.seed)): + self.lastq = np.bitwise_xor(self.lastq, self.v[: self.d, loc - 1]) + + def __next__(self) -> np.ndarray: + """Return next Sobol sequence.""" + loc = self.l0(self.seed) + quasi = self.lastq * self.recipd + self.lastq = np.bitwise_xor(self.lastq, self.v[: self.d, loc - 1]) + self.seed += 1 + return quasi + + @staticmethod + def h1(n: int) -> int: + """Return high 1 bit index for a given integer.""" + return len(format(n, "b")) - abs(format(n, "b").find("1")) + + @staticmethod + def l0(n: int) -> int: + """Return low 0 bit index for a given integer.""" + x = format(n, "b")[::-1].find("0") + if x != -1: + return x + 1 + return len(format(n, "b")) + 1 diff --git a/modcma/utils.py b/modcma/utils.py index 22b6cfd..bc8bdc0 100644 --- a/modcma/utils.py +++ b/modcma/utils.py @@ -1,103 +1,107 @@ +"""Implementation of various utilities used in ModularCMA-ES package.""" + import warnings import typing -from collections import OrderedDict from inspect import Signature, Parameter, getmodule from functools import wraps from time import time + import numpy as np + class Descriptor: - '''Data descriptor''' + """Data descriptor.""" def __set_name__(self, owner, name): - '''Set name attribute ''' + """Set name attribute.""" self.name = name def __set__(self, instance, value): - 'Set value on instance' + """Set value on instance.""" instance.__dict__[self.name] = value def __delete__(self, instance): - '''Delete attribute from the instance __dict__''' + """Delete attribute from the instance __dict__.""" del instance.__dict__[self.name] class InstanceOf(Descriptor): - '''Data descriptor checks for correct types. ''' + """Data descriptor checks for correct types.""" def __init__(self, dtype): + """Set dtype.""" self.dtype = dtype - self.__doc__ += "Type: {}".format(self.dtype) def __set__(self, instance, value): + """Set the value of instance to value, checks type of argument. + + Raises + ------ + TypeError + If type of the argument does not match self.dtype + + """ if type(value) != type(None): - if type(value) != self.dtype and not ( - isinstance(value, np.generic) and type( - value.item()) == self.dtype)\ - and str(self.dtype)[1:] != value.__class__.__name__: - # we should find another way for the last statement - raise TypeError("{} should be {} got type {}: {}".format( - self.name, self.dtype, - type(value), str(value)[:50] - )) - if hasattr(value, '__copy__'): + + if ( + type(value) != self.dtype + and not ( + isinstance(value, np.generic) and type(value.item()) == self.dtype + ) + and str(self.dtype)[1:] != value.__class__.__name__ + ): + raise TypeError( + "{} should be {} got type {}: {}".format( + self.name, self.dtype, type(value), str(value)[:50] + ) + ) + if hasattr(value, "__copy__"): value = value.copy() super().__set__(instance, value) + class AnyOf(Descriptor): - '''Descriptor, checks of value is Any of a specified sequence of options. ''' + """Descriptor, checks of value is Any of a specified sequence of options.""" def __init__(self, options=None): + """Set options.""" self.options = options - self.__doc__ += "Options: [{}]".format( - ', '.join(map(str, self.options)) - ) def __set__(self, instance, value): + """Set the value of instance to value, checks value of argument to match self.options. + + Raises + ------ + TypeError + If type of the argument does not match self.dtype + + """ if value not in self.options: - raise ValueError("{} should be any of {}".format( - self.name, self.options - )) + raise ValueError( + "{} should be any of [{}]. Got: {}".format( + self.name, self.options, value) + ) super().__set__(instance, value) + class AnnotatedStructMeta(type): - '''Metaclass for class for AnnotatedStruct. + """Metaclass for class for AnnotatedStruct. - Wraps all parameters defined in the class body with - __annotations__ into a signature. It additionally wraps each - parameter into a descriptor using __annotations__, - allowing for type checking. + Wraps all parameters defined in the class body with + __annotations__ into a signature. It additionally wraps each + parameter into a descriptor using __annotations__, + allowing for type checking. Currently, only two types of descriptors are implementated, InstanceOf and typing.AnyOf, the first implements simple type validation, the latter implements validation though the use of sequence of - allowed values. - ''' - - @classmethod - def __prepare__(cls: typing.Any, name: str, bases: tuple) -> OrderedDict: - '''Normally, __prepare__ returns an empty dictionairy, - now an OrderedDict is returned. This allowes for ordering - the parameters (*args). - - Parameters - ---------- - cls: typing.Any - The empty body of the class to be instantiated - name: str - The name of the cls - bases: tuple - The base classes of the cls - - Returns - ------- - OrderedDict - ''' - return OrderedDict() + allowed values. + """ def __new__(cls: typing.Any, name: str, bases: tuple, attrs: dict) -> typing.Any: - '''Controls instance creation of classes that have AnnotatedStructMeta as metaclass - All cls attributes that are defined in __annotations__ are wrapped - into either an typing.AnyOf or an InstanceOf descriptor, depending on + """Control instance creation of classes that have AnnotatedStructMeta as metaclass. + + All cls attributes that are defined in __annotations__ are wrapped + into either an typing.AnyOf or an InstanceOf descriptor, depending on the type of the annotation. If the annotation is a sequence, the first element is used as a default value. @@ -108,47 +112,57 @@ def __new__(cls: typing.Any, name: str, bases: tuple, attrs: dict) -> typing.Any name: str The name of the cls bases: dict - The base classes of the cls + The base classes of the cls attrs: dict The attributes of the cls Returns ------- A new cls object - ''' + + """ parameters = [] - for key, annotation in attrs.get('__annotations__', {}).items(): + for key, annotation in attrs.get("__annotations__", {}).items(): default_value = attrs.get(key, Parameter.empty) - - if isinstance(annotation, list) or isinstance(annotation, tuple): + + if isinstance(annotation, (list, tuple)): attrs[key] = AnyOf(annotation) else: - if not type(annotation) == type and getmodule(type(annotation)) != typing: + if ( + not type(annotation) == type + and getmodule(type(annotation)) != typing + ): raise TypeError( f"Detected wrong format for annotations of AnnotatedStruct.\n\t" f"Format should be : = \n\t" f"Got: {name}: {annotation} = {default_value}" - ) + ) attrs[key] = InstanceOf(annotation) - parameters.append(Parameter(name=key, default=default_value, - kind=Parameter.POSITIONAL_OR_KEYWORD)) + parameters.append( + Parameter( + name=key, + default=default_value, + kind=Parameter.POSITIONAL_OR_KEYWORD, + ) + ) clsobj = super().__new__(cls, name, bases, attrs) - setattr(clsobj, '__signature__', Signature(parameters=parameters)) + setattr(clsobj, "__signature__", Signature(parameters=parameters)) return clsobj + class AnnotatedStruct(metaclass=AnnotatedStructMeta): - '''Custom class for defining structs. + """Custom class for defining structs. Automatically sets parameters defined in the signature. - AnnotatedStruct objects, and children thereof, require + AnnotatedStruct objects, and children thereof, require the following structure: class Foo(AnnotatedStruct): variable_wo_default : type variable_w_default : type = value The metaclass will automatically assign a decriptor object - to every variable, allowing for type checking. + to every variable, allowing for type checking. The init function will be dynamically generated, and user specified values in the *args **kwargs, will override the defaults. The *args will follow the order as defined in the class body: @@ -160,32 +174,35 @@ class Foo(AnnotatedStruct): The calling signature, instantiated by the metaclass __bound__ : Signature The bound signature, bound to the *args and **kwargs - ''' + + """ def __init__(self, *args, **kwargs) -> None: + """Bind *args and **kwargs to a signature instantiated by the metaclass.""" self.__bound__ = self.__signature__.bind(*args, **kwargs) self.__bound__.apply_defaults() for name, value in self.__bound__.arguments.items(): setattr(self, name, value) def __repr__(self) -> str: + """Representation for a AnnotatedStruct object.""" return "<{}: ({})>".format( - self.__class__.__qualname__, ', '.join( + self.__class__.__qualname__, + ", ".join( "{}={}".format(name, getattr(self, name)) for name, value in self.__bound__.arguments.items() - ) + ), ) - def set_default(self, name:str, default_value: typing.Any) -> None: - 'Helper method to set default parameters' + def set_default(self, name: str, default_value: typing.Any) -> None: + """Helper method to set default parameters.""" current = getattr(self, name) if type(current) == type(None): setattr(self, name, default_value) def timeit(func): - '''Decorator function for timing the excecution of - a function. + """Decorator function for timing the excecution of a function. Parameters ---------- @@ -196,26 +213,28 @@ def timeit(func): ------- typing.Callable a wrapped function - ''' + + """ @wraps(func) def inner(*args, **kwargs): start = time() res = func(*args, **kwargs) print("Time elapsed", time() - start) return res + return inner def ert(evals, budget): - '''Computed the expected running time of - a list of evaluations. + """Computed the expected running time of a list of evaluations. Parameters ---------- evals: list a list of running times (number of evaluations) budget: int - the maximum number of evaluations + the maximum number of evaluations + Returns ------- float @@ -225,7 +244,8 @@ def ert(evals, budget): The standard deviation of the expected running time int The number of successful runs - ''' + + """ if any(evals): try: with warnings.catch_warnings(): @@ -234,6 +254,6 @@ def ert(evals, budget): n_succ = (evals < budget).sum() _ert = float(evals.sum()) / int(n_succ) return _ert, np.std(evals), n_succ - except: + except Exception: pass - return float('inf'), np.nan, 0 + return float("inf"), np.nan, 0 diff --git a/requirements.txt b/requirements.txt index ea6b00c..c4dfe5a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,4 @@ -coverage==5.0.3 IOHexperimenter>=0.2.8 -ghalton==0.6.2 +numba>=0.52.0 numpy>=1.18.5 -scipy>=1.4.1 -sobol-seq==0.2.0 -Sphinx==3.3.0 -sphinx-automodapi==0.13 -sphinx-rtd-theme==0.5.0 -twine==3.2.0 +scipy>=1.4.1 \ No newline at end of file diff --git a/run.py b/run.py deleted file mode 100644 index 27ea933..0000000 --- a/run.py +++ /dev/null @@ -1,187 +0,0 @@ -import os - -import numpy as np -from modcma.modularcmaes import evaluate -from modcma.bbob import bbobbenchmarks - -import numpy as np -# import cma -# from inverse_covariance import QuicGraphicalLasso -from scipy.stats import special_ortho_group - - -def correlation_matrix(C): - c = C.copy() - for i in range(c.shape[0]): - fac = c[i, i]**0.5 - c[:, i] /= fac - c[i, :] /= fac - c = (c + c.T) / 2.0 - return c - - -def reset_ccov_learning_rate(es, nz): - es.opts['CMA_rankone'] = 1. - es.opts['CMA_rankmu'] = 1. - es.sp = cma.evolution_strategy._CMAParameters(N = es.N, - opts = es.opts, - ccovfac = 1., - verbose = es.opts['verbose'] > 0) - - - - ccovfac = 1.0 - alphacov = 2.0 - mueff = es.sp.weights.mueff - - rankone_factor = 2. / (2 * (nz/es.N + 0.15) * (es.N + 1.3)**1. + - es.sp.weights.mueff) / es.sp.c1 - - # this happens in PyCMA - c1 = (1.0 * rankone_factor * ccovfac * min(1, es.sp.popsize / 6) * - 2 / ((es.N + 1.3)** 2.0 + mueff)) - - c1_ = 2 / ( - (nz + 1.3) * (es.N + 1.3) + mueff - ) - cmu_ = min(1 - c1_, ( - 2 * ( (mueff + 1 / mueff - 1.75) / ( - (nz + 2) * (es.N + 2) + mueff - ) - ) - ) - ) - - breakpoint() - - - # rankmu_factor = 2.*(0.25+es.sp.weights.mueff + 1. / es.sp.weights.mueff - \ - # 2. + 0*es.popsize / ( 2 * (es.popsize + 5))) / \ - # (2 * (nz/es.N + 0.5) * (es.N + 2)**1. + - # es.sp.weights.mueff) / es.sp.cmu - # # this happens in PyCMA - # sp.cmu = min(1 - sp.c1, - # rankmu_factor * ccovfac * alphacov * - # (rankmu_offset + mu + 1 / mu - 2) / - # ((N + 2)** 2.0 + alphacov * mu / 2)) - - # breakpoint() - ccovfac = 1. # For testing purpose - es.opts['CMA_rankone'] = rankone_factor * ccovfac - es.opts['CMA_rankmu'] = rankmu_factor * ccovfac - es.sp = cma.evolution_strategy._CMAParameters(N = es.N, - opts = es.opts, - verbose = es.opts['verbose'] > 0) - -def run_pycma_reg(testfun=None): - dim = 5 - P1 = np.eye(dim) - B1 = special_ortho_group.rvs(int(dim/2)) - B2 = special_ortho_group.rvs(int(dim/2)) - B = np.eye(dim) - P2 = np.random.permutation(np.eye(dim)) - testfun = testfun or (lambda x: cma.ff.elli(np.linalg.multi_dot([P2, B, P1, np.array(x)]))) - for dim in [dim]: - for threshold in np.linspace(0., 1., 2): - factor = 1. - alpha = 1 - - thresholds = [threshold] - prefix = '2block_elli_dim_'+str(dim)+'_thr'+str(int(10000*threshold))+'e-4' - es = cma.CMAEvolutionStrategy(dim*[3], 1.0, inopts = {'ftarget':1e-10, - 'CMA_active':False, - 'verbose':1, - 'verb_filenameprefix':prefix, - #'AdaptSigma':False, - - }) - es.adapt_sigma.initialize(es) - - sm_D, sm_B = es.sm.D.copy(), es.sm.B.copy() - - while not es.stop(): - C_tilde = es.sm.correlation_matrix - P = np.linalg.inv(C_tilde) - P_tilde = correlation_matrix(P) - - #Regularize the sample matrix - W = alpha * np.float_(np.abs(P_tilde) < threshold) #*(1-np.abs(P_tilde))**2. - - est = QuicGraphicalLasso(lam=W, - Sigma0=C_tilde, - Theta0=P, - init_method=lambda x: (x.copy(), - np.max(np.abs(np.triu(x)))), - ).fit(C_tilde) - - diag_root_C = np.diag(es.sm.C) - - sample_matrix = np.linalg.multi_dot( - (np.diag(diag_root_C**.5), est.covariance_, - np.diag(diag_root_C**.5))) - - - sm_D, sm_B = np.linalg.eigh(sample_matrix) - - def my_transform_inverse(x): - return np.dot(sm_B, np.dot(sm_B.T, x) / sm_D**.5) - - - es.sm.transform_inverse = my_transform_inverse - nz = np.sum(np.abs(np.triu(est.precision_, 1))>0) - reset_ccov_learning_rate(es, nz+dim) # this is also differrent! - - #Sample and Update - arz = np.random.randn(es.sp.popsize, dim) - X = es.ask() - X = np.dot(sm_B, (sm_D**.5 * arz).T).T - Y = [es.mean+es.sigma*x for x in X] #needs additionally sigma_vec - fit = [testfun(y) for y in Y] - - es.tell(Y, fit) - es.disp(100) - -def toast(msg): - msg = (msg - .replace('\n', '`n') - .replace('\t', '') - .replace(' ', '` ') - .replace(',', '`,') - .replace('(', '`(') - .replace(')', '`)') - ) - os.system(f"powershell.exe -command New-BurntToastNotification -Text '{msg}'") - -if __name__ == "__main__": - import sys, shutil - # run_pycma_reg() - *_, msg = evaluate(1, 5, 5, label="CMA-ES") - - # for f in (1, 2): - # for tau in np.linspace(.2, .99, 10): - # print() - # *_, msg = evaluate(f, 5, 5, regularization=True, tau=tau, label=f"CMA-ES (reg, tau={tau})") - - # iterations = 5 - # for f in range(1, 25): - # *_, msg = evaluate(f, 4, iterations, regularization=False, label="CMA-ES") - # print() - # exp_name = 'test-regularization' - # if os.path.isdir(os.path.join("./data", exp_name)): - # shutil.rmtree(os.path.join("./data", exp_name)) - - # init sigma should be 1 - - # f = int(sys.argv[1]) - # for d in (5, 10, 20, 40, 80): - # *_, msg = evaluate(f, d, iterations, regularization=False, label="CMA-ES", logging=True, data_folder = "./data", exp_name=exp_name) - # print() - # for tau in np.linspace(.0, .99, 10): - # *_, msg = evaluate(f, d, iterations, regularization=True, tau=tau, label=f"CMA-ES (reg, tau={tau})", logging=True, data_folder = "./data", exp_name=exp_name) - # print() - # print("*"*80) - # print() - - - - \ No newline at end of file diff --git a/tests/__init__.py b/tests/__init__.py index e69de29..6a29122 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -0,0 +1 @@ +"""Module containing tests for ModularCMA-ES package.""" \ No newline at end of file diff --git a/tests/expected.py b/tests/expected.py new file mode 100644 index 0000000..b451ee8 --- /dev/null +++ b/tests/expected.py @@ -0,0 +1,600 @@ +BBOB_2D_PER_MODULE_20_ITER = { + "active_True": [ + 79.60870540043959, + -6.834740339081748, + -446.942231900092, + -453.1166279102467, + 12.91474629678331, + 2057.8823697805324, + 93.54091559010935, + 149.29454116123992, + 123.88066647543144, + 6731.6154970887155, + 102443.81430369585, + 643625.010286943, + 45.175034363167605, + -52.01531942754774, + 1017.3030673363197, + 71.43871188992895, + 3.146564668875037, + -8.279403543275532, + -102.10176326857986, + -543.6999174336394, + 42.700744146686965, + -999.9997057338787, + 13.04988097465894, + 117.29640024093563, + ], + "base_sampler_gaussian": [ + 79.60197400323416, + 9075.81294241299, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903702, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "base_sampler_halton": [ + 79.48077842021073, + 136.53129100639717, + -446.7506458965296, + -446.87082153002194, + 11.950884905405736, + 5329.481213692499, + 93.0361464944175, + 149.71900030510446, + 124.02881103802382, + 70984.29907315908, + 1122036.1339262475, + 323500.4088727226, + 40.771980667227886, + -51.48698849157431, + 1020.9436930982043, + 78.05502682158907, + -4.961694831220257, + 157.23783058587483, + -102.30221897973205, + -543.7781557151272, + 42.67339847137175, + -999.9963011818812, + 14.73597350603703, + 124.02200389471975, + ], + "base_sampler_sobol": [ + 79.58776048088133, + 3422.226350474581, + -445.62619936885244, + -452.131564688653, + 7.17122029401677, + 42.96515961182474, + 93.45715847183976, + 149.54498657058198, + 126.88596518033647, + 298.42477393412827, + 1365728.4147451057, + 48635.303416026385, + 48.505871726336075, + -51.74059898989086, + 1016.9939691575535, + 72.24266434815569, + -2.605200628010161, + 169.34805329384733, + -102.5486907676662, + -543.3053885969988, + 42.781850042137705, + -999.9962323826467, + 12.046411510472002, + 107.7143392718549, + ], + "bound_correction_COTN": [ + 79.60197400323416, + 9075.812942413004, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903709, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "bound_correction_mirror": [ + 79.60197400323416, + 9075.812942413004, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903709, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "bound_correction_saturate": [ + 79.60197400323416, + 9075.812942413004, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903709, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "bound_correction_toroidal": [ + 79.60197400323416, + 9075.812942413004, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903709, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "bound_correction_unif_resample": [ + 79.60197400323416, + 9075.812942413004, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903709, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "elitist_True": [ + 79.58359771151284, + 3722.3761689261983, + -444.6996743229732, + -449.63233155751726, + 11.403773075168843, + 1938.1349061808248, + 93.54091559010935, + 149.72424316023083, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 338749.1203340543, + 43.368771569040824, + -51.8908644736279, + 1017.567313102047, + 82.16884108988424, + 5.9547855849801365, + 1.4811701439965574, + -102.1160727430898, + -543.698445859984, + 42.60165481703265, + -999.9991133679798, + 9.843395190328877, + 111.69367145373013, + ], + "local_restart_BIPOP": [ + 79.60197400323416, + 9075.81294241299, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903702, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "local_restart_IPOP": [ + 79.60197400323416, + 9075.81294241299, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903702, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "mirrored_mirrored": [ + 79.49271732413378, + -3.9902436014381237, + -449.43912237059925, + -454.76923415255794, + -5.716753396809776, + 38.656680881133425, + 92.96068633887359, + 149.4276007295378, + 124.29995348854516, + 496.9430991621704, + 5774.525239039822, + 10810.490466468276, + 65.01376333683325, + -52.14160901603763, + 1016.7839906734483, + 77.37715351910123, + -0.08317538023811721, + 71.41166512690214, + -101.99373657488567, + -545.1782424507006, + 42.700744146686965, + -999.5409569330642, + 16.018941166090844, + 118.76164430789493, + ], + "mirrored_mirrored pairwise": [ + 79.49271732413378, + -3.9902436014381237, + -449.9527094090616, + -453.43140996395596, + -5.716753396809776, + 38.656680881133425, + 93.54091559010935, + 149.66006205975523, + 124.3073642920228, + 496.9430991621704, + 5774.525239039822, + 10810.490466468276, + 65.01376333683325, + -52.14160901603763, + 1012.9356025505306, + 79.37035747661811, + -0.08086837288995241, + -7.360624274953013, + -102.5420591790735, + -545.1782424507006, + 42.700744146686965, + -999.9999959402685, + 11.36066981667674, + 120.60256772763952, + ], + "orthogonal_True": [ + 80.07173193213463, + -25.0810474775418, + -448.44029337005736, + -450.2217227339582, + 17.131150531337912, + 104768.75359048153, + 92.94000013395934, + 149.41567464975682, + 125.54323736579038, + 1949380.8182024239, + 7892515.3458270915, + 1608304.8817702506, + 75.99337230697218, + -50.85961901646829, + 1013.0164228422623, + 76.63491974976154, + 15.945556157304662, + 218.11413710292456, + -102.47909541110062, + -543.8195887825041, + 42.93989939186737, + -970.8073891259909, + 13.673346703024826, + 119.86000340058254, + ], + "sequential_True": [ + 81.42670307730923, + -197.03460133734959, + -440.14992398127674, + -451.12732572736394, + -13.291945048225214, + 6076.981902969733, + 94.95355998430679, + 149.29642394250206, + 124.1297597307256, + 4971732.424278931, + 12676599.493062815, + 10404.44386051647, + 57.21633319868311, + -48.17074308306809, + 1014.2663105715213, + 75.88548853703115, + -1.9949361760935211, + 5.935574663612218, + -101.06186715149457, + -543.698445859984, + 42.700744146686965, + -999.9194766573045, + 16.134216067911535, + 120.60256772763944, + ], + "step_size_adaptation_csa": [ + 79.60197400323416, + 9075.81294241299, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903702, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "step_size_adaptation_msr": [ + 79.55182029831956, + 20022.886653199315, + -442.3568666516263, + -458.5878553497072, + 18.230224667407526, + 3437.0948931697662, + 93.54091559010935, + 149.24566420464936, + 123.88066647543144, + 745.4050969158611, + 21842.10615574948, + 2306430.0628835405, + 48.237613840872996, + -51.65799097458237, + 1017.7040292314994, + 71.36065658286809, + 3.729454150250728, + -9.62302556384418, + -102.18595163861995, + -543.8419319880991, + 42.700744146686965, + -999.8834332320595, + 15.102687676216153, + 114.74534644305785, + ], + "step_size_adaptation_tpa": [ + 79.5538591212953, + -141.94345328377693, + -440.14992398127674, + -458.5878553497072, + 4.962627005713351, + 6828.313541466857, + 93.54091559010935, + 149.6694297943688, + 123.88066647543144, + 49.984511609200496, + 808213.8882862442, + 14527.46183219469, + 77.93028995072423, + -51.04722546877122, + 1013.9675706102718, + 77.79736868596385, + 4.187110321889211, + -5.108167534192148, + -102.10176326857986, + -543.8179322418812, + 42.65828050834767, + -999.8888868157063, + 15.102687676216153, + 117.28401550404162, + ], + "threshold_convergence_True": [ + 79.60197400323416, + 9075.81294241299, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903702, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "weights_option_1/2^lambda": [ + 79.5630351924981, + 4629.283234379299, + -440.14992398127674, + -458.5322418694519, + 17.401736662231777, + 2171.3837540807126, + 93.54091559010935, + 149.3215688070224, + 123.88066647543144, + 4775.304616031011, + 2485.875547646586, + 2552406.7655545343, + 57.15312947598912, + -51.89261437835274, + 1017.7040292314994, + 73.89054993912737, + -12.20385658566134, + 9.78378595249066, + -102.15555135313454, + -543.698445859984, + 42.700744146686965, + -999.9821332012928, + 18.573719726473, + 111.84525007583429, + ], + "weights_option_default": [ + 79.60197400323416, + 9075.81294241299, + -443.0425148340509, + -449.63233155751726, + 15.221583968074889, + 1938.1349061808248, + 93.54091559010935, + 149.27592202799931, + 123.88066647543144, + 7116.932319384926, + 335648.9894252748, + 1407321.3639251885, + 43.368771569040824, + -52.116212719435566, + 1017.7040292314994, + 72.00354123874057, + 7.140121147903702, + 34.24774851152607, + -102.24646124346252, + -543.698445859984, + 42.700744146686965, + -999.8961680110624, + 13.582863728801364, + 116.76955516252966, + ], + "weights_option_equal": [ + 79.4841795554343, + -206.93295112420896, + -446.8074233371284, + -445.1350192190321, + 21.06207513561158, + 46.678137440267065, + 93.54091559010935, + 149.56940870119797, + 123.86194759511636, + 29972.293198196898, + 361029.7991199966, + 7697201.177123937, + 89.58749452760148, + -51.4644595745646, + 1017.7040292314994, + 77.27755720351972, + -10.054973805866888, + 12.496064113477054, + -102.10176326857986, + -543.7171538434422, + 42.677106643653936, + -999.99941581072, + 14.937956237333932, + 115.99850936925196, + ], +} \ No newline at end of file diff --git a/tests/test_asktellcmaes.py b/tests/test_asktellcmaes.py index f573116..07c9685 100644 --- a/tests/test_asktellcmaes.py +++ b/tests/test_asktellcmaes.py @@ -1,47 +1,58 @@ +"""Module containing tests for Ask-tell interface of ModularCMA-ES.""" + import unittest import numpy as np from modcma import asktellcmaes from IOHexperimenter import IOH_function + class AskTellCMAESTestCase(unittest.TestCase): + """Test case for ask-tell interface of Modular CMA-ES.""" + def setUp(self): + """Test setup method.""" self.d = 5 self.fid = 1 - self.func = IOH_function(1, 5, 1, suite = "BBOB") - self.opt = asktellcmaes.AskTellCMAES(self.d, target = 79.48) + self.func = IOH_function(1, 5, 1, suite="BBOB") + self.opt = asktellcmaes.AskTellCMAES(self.d, target=79.48) def test_sequential_selection_disabled(self): + """Test whether sequential is disabled.""" self.opt.parameters.sequential = True with self.assertRaises(NotImplementedError): _ = self.opt.ask() def test_unkown_xi(self): + """Test whether errors are produced correctly.""" with self.assertRaises(RuntimeError): - self.opt.tell(np.random.random((self.d, 1)), 90.) + self.opt.tell(np.random.uniform(size=(self.d, 1)), 90.0) _ = self.opt.ask() with self.assertRaises(ValueError): - self.opt.tell(np.random.random((self.d, 1)), 90.) - + self.opt.tell(np.random.uniform(size=(self.d, 1)), 90.0) def test_warns_on_repeated_xi(self): + """Test whether warnings are produced correctly.""" xi = self.opt.ask() self.opt.tell(xi, self.func(xi.flatten())) with self.assertWarns(UserWarning): self.opt.tell(xi, self.func(xi.flatten())) def test_ask(self): + """Test ask mechanism.""" xi = self.opt.ask() self.assertIsInstance(xi, np.ndarray) self.assertEqual(len(xi), self.d) def test_tell(self): + """Test tell mechanism.""" xi = self.opt.ask() fi = self.func(xi.flatten()) self.opt.tell(xi, fi) self.assertEqual(self.opt.parameters.population.f[0], fi) - + def test_single_run(self): + """Test a single run of the mechanism.""" while True: try: xi = self.opt.ask() @@ -52,10 +63,12 @@ def test_single_run(self): self.assertNotEqual(len(self.opt.ask_queue), 0) def test_disabled_functions(self): + """Test whether errors are produced correctly.""" with self.assertRaises(NotImplementedError): self.opt.run() with self.assertRaises(NotImplementedError): self.opt.step() -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() \ No newline at end of file diff --git a/tests/test_fmin.py b/tests/test_fmin.py index 600d717..7121683 100644 --- a/tests/test_fmin.py +++ b/tests/test_fmin.py @@ -1,16 +1,22 @@ +"""Module containing tests for fmin function.""" + import unittest from modcma import modularcmaes + class TestFmin(unittest.TestCase): + """Test case for fmin function of Modular CMA-ES.""" + def test_best_so_far_storage(self): + """Test storage of best so far individual.""" c = modularcmaes.ModularCMAES(sum, 5) c.step() self.assertEqual(len(c.parameters.xopt), 5) self.assertEqual(sum(c.parameters.xopt), c.parameters.fopt) - + def test_fmin(self): - xopt, fopt, evaluations = modularcmaes.fmin(sum, 5, target = 0.) + """Test a single run of the mechanism.""" + xopt, fopt, evaluations = modularcmaes.fmin(sum, 5, target=0.0) self.assertEqual(sum(xopt), fopt) self.assertGreater(evaluations, 0) self.assertEqual(len(xopt), 5) - diff --git a/tests/test_modularcmaes.py b/tests/test_modularcmaes.py index c0f0fef..5fd8a51 100644 --- a/tests/test_modularcmaes.py +++ b/tests/test_modularcmaes.py @@ -1,3 +1,5 @@ +"""Module containing tests for ModularCMA-ES.""" + import os import shutil import io @@ -5,73 +7,107 @@ import unittest.mock import numpy as np + from modcma import parameters, utils, modularcmaes +from IOHexperimenter import IOH_function +from .expected import BBOB_2D_PER_MODULE_20_ITER -class TestModularCMAESMeta(type): - def __new__(classes, name, bases, clsdict): - def gen_test(module, value): - def do_test(self): - return self.run_module(module, value) - return do_test - for module in parameters.Parameters.__modules__: +class TestModularCMAESMeta(type): + """Metaclass for generating test-cases.""" + + def __new__(classes, name, bases, clsdict): + """Method for generating new classes.""" + + def generate_tests(module, value): + return dict( + { + f"test_{module}_{module}": lambda self: self.run_module( + module, value + ) + }, + **{ + f"test_{module}_{module}_f{fid}": lambda self: self.run_bbob_function( + module, value, fid + ) + for fid in range(1, 25) + }, + ) + + for module in parameters.Parameters.__modules__: m = getattr(parameters.Parameters, module) if type(m) == utils.AnyOf: for o in filter(None, m.options): - clsdict[f"test_{module}_{o}"] = gen_test(module, o) + clsdict.update(generate_tests(module, o)) + elif type(m) == utils.InstanceOf: - clsdict[f"test_{module}_True"] = gen_test(module, True) - - clsdict[f"test_standard"] = gen_test('active', True) + clsdict.update(generate_tests(module, True)) + return super().__new__(classes, name, bases, clsdict) -class TestModularCMAES( - unittest.TestCase, - metaclass=TestModularCMAESMeta): - _dim = 2 - _budget = int(1e2 * _dim) +class TestModularCMAES(unittest.TestCase, metaclass=TestModularCMAESMeta): + """Test case for ModularCMAES Object. Gets applied for all Parameters.__modules__.""" + + _dim = 2 + _budget = int(1e1 * _dim) def run_module(self, module, value): + """Test a single run of the mechanism with a given module active.""" self.p = parameters.Parameters( - self._dim, budget = self._budget, - **{module:value} - ) - self.c = modularcmaes.ModularCMAES( - sum, parameters=self.p).run() + self._dim, budget=self._budget, **{module: value} + ) + self.c = modularcmaes.ModularCMAES(sum, parameters=self.p).run() - def test_select_raises(self): - c = modularcmaes.ModularCMAES(sum, 5, - mirrored='mirrored pairwise' + def run_bbob_function(self, module, value, fid): + """Expects the output to be consistent with BBOB_2D_PER_MODULE_20_ITER.""" + np.random.seed(42) + f = IOH_function(fid, self._dim, 1) + self.p = parameters.Parameters( + self._dim, budget=self._budget, **{module: value} + ) + self.c = modularcmaes.ModularCMAES(f, parameters=self.p).run() + + self.assertAlmostEqual( + self.c.parameters.fopt, + BBOB_2D_PER_MODULE_20_ITER[f"{module}_{value}"][fid - 1], ) + + def test_select_raises(self): + """Test whether errors are produced correctly.""" + c = modularcmaes.ModularCMAES(sum, 5, mirrored="mirrored pairwise") c.mutate() c.parameters.population = c.parameters.population[:3] with self.assertRaises(ValueError): c.select() def test_local_restart(self): + """Test a single iteration of the mechanism with a given local restart active.""" for lr in filter(None, parameters.Parameters.local_restart.options): - c = modularcmaes.ModularCMAES( - sum, 5, local_restart=lr) + c = modularcmaes.ModularCMAES(sum, 5, local_restart=lr) for _ in range(10): c.step() - + c.parameters.max_iter = 5 c.step() - - + + class TestModularCMAESSingle(unittest.TestCase): + """Test case for ModularCMAES Object, holds custom tests.""" + def test_str_repr(self): + """Test the output of repr and str.""" c = modularcmaes.ModularCMAES(sum, 5) self.assertIsInstance(str(c), str) self.assertIsInstance(repr(c), str) def test_n_generations(self): - c = modularcmaes.ModularCMAES(sum, 5, n_generations = 5) + """Test n iterations of the mechanism.""" + c = modularcmaes.ModularCMAES(sum, 5, n_generations=5) self.assertEqual(1, len(c.break_conditions)) - for i in range(5): + for _ in range(5): c.step() self.assertTrue(any(c.break_conditions)) @@ -79,36 +115,37 @@ def test_n_generations(self): c = modularcmaes.ModularCMAES(sum, 5) self.assertEqual(2, len(c.break_conditions)) - def testtpa_mutation(self): + """Test tpa mutation.""" + class TpaParameters: - sigma = .4 + sigma = 0.4 rank_tpa = None - a_tpa = .3 + a_tpa = 0.3 b_tpa = 0 + def __init__(self, m_factor=1.1): - self.m = np.ones(5) * .5 + self.m = np.random.rand(5, 1) self.m_old = self.m * m_factor - + p = TpaParameters() - x, y, f = [], [], [] - modularcmaes.tpa_mutation(sum, p, x, y, f) - for _, l in enumerate([x,y,f]): - self.assertEqual(len(l), 2) - - self.assertListEqual((-y[0]).tolist(), y[1].tolist()) - - for xi, fi in zip(x, f): + y, x, f = modularcmaes.tpa_mutation(sum, p) + for _, l in enumerate([x, y, f]): + self.assertIn(2, l.shape) + + self.assertListEqual((-y[:, 0]).tolist(), y[:, 1].tolist()) + + for xi, fi in zip(x.T, f): self.assertEqual(sum(xi), fi) - - self.assertEqual(p.rank_tpa, p.a_tpa + p.b_tpa) + + self.assertEqual(p.rank_tpa, p.a_tpa + p.b_tpa) p = TpaParameters(-2) - x, y, f = [], [], [] - modularcmaes.tpa_mutation(sum, p, x, y, f) + y, x, f = modularcmaes.tpa_mutation(sum, p) self.assertEqual(p.rank_tpa, -p.a_tpa) def test_scale_with_treshold(self): + """Test threshold mutations.""" threshold = 5 z = np.ones(20) new_z = modularcmaes.scale_with_threshold(z.copy(), threshold) @@ -118,38 +155,41 @@ def test_scale_with_treshold(self): self.assertGreater(new_z_norm, threshold) def testcorrect_bounds(self): + """Test bound correction.""" x = np.ones(5) * np.array([2, 4, 6, -7, 3]) ub, lb = np.ones(5) * 5, np.ones(5) * -5 - disabled, *correction_methods = parameters.Parameters.__annotations__\ - .get("bound_correction") + disabled, *correction_methods = parameters.Parameters.__annotations__.get( + "bound_correction" + ) new_x, corrected = modularcmaes.correct_bounds(x.copy(), ub, lb, disabled) self.assertEqual((x == new_x).all(), True) self.assertEqual(corrected, True) - + for correction_method in correction_methods: - new_x, corrected = modularcmaes.\ - correct_bounds(x.copy(), ub, lb, correction_method) + new_x, corrected = modularcmaes.correct_bounds( + x.copy(), ub, lb, correction_method + ) self.assertEqual(corrected, True) self.assertNotEqual((x == new_x).all(), True) - self.assertGreaterEqual( np.min(new_x), -5) + self.assertGreaterEqual(np.min(new_x), -5) self.assertLessEqual(np.max(new_x), 5) self.assertEqual((x[[0, 1, 4]] == new_x[[0, 1, 4]]).all(), True) with self.assertRaises(ValueError): modularcmaes.correct_bounds(x.copy(), ub, lb, "something_undefined") - - @unittest.mock.patch('sys.stdout', new_callable=io.StringIO) + + @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_evaluate_bbob(self, mock_std): - data_folder = os.path.join(os.path.dirname(__file__), 'tmp') + """Test the mechanism of evaluate_bbob.""" + data_folder = os.path.join(os.path.dirname(__file__), "tmp") if not os.path.isdir(data_folder): os.mkdir(data_folder) modularcmaes.evaluate_bbob(1, 1, 1, logging=True, data_folder=data_folder) - shutil.rmtree(data_folder) + shutil.rmtree(data_folder) modularcmaes.evaluate_bbob(1, 1, 1) - - -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() + breakpoint() diff --git a/tests/test_parameters.py b/tests/test_parameters.py index 8a76316..38fbf59 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -1,9 +1,10 @@ +"""Module containing tests for ModularCMA-ES Parameters.""" + import os import unittest import warnings import pickle - import numpy as np from modcma.parameters import Parameters @@ -13,86 +14,92 @@ class TestParameters(unittest.TestCase): + """Test case for Parameters object of Modular CMA-ES.""" + def setUp(self): + """Test setup method.""" np.random.seed(42) self.p = Parameters(5) - + def try_wrong_types(self, p, name, type_): - for x in (1, 1., 'x', True, np.ndarray,): + """Test for wrong input types.""" + for x in (1, 1.0, "x", True, np.ndarray,): if type(x) != type_: with self.assertRaises(TypeError, msg=f"{name} {type_} {x}"): setattr(p, name, x) - def test_updating(self): - self.p.update(dict(mirrored='mirrored')) - self.assertEqual(self.p.mirrored, 'mirrored') - self.assertEqual(self.p.sampler.__name__, 'mirrored_sampling') + """Test the updating of parameters.""" + self.p.update(dict(mirrored="mirrored")) + self.assertEqual(self.p.mirrored, "mirrored") + self.assertEqual(self.p.sampler.__name__, "mirrored_sampling") with self.assertRaises(ValueError): self.p.update(dict(nonexist=10)) - + self.p.update(dict(active=True)) self.assertEqual(self.p.active, True) - self.assertEqual(self.p.mirrored, 'mirrored') - self.assertEqual(self.p.sampler.__name__, 'mirrored_sampling') + self.assertEqual(self.p.mirrored, "mirrored") + self.assertEqual(self.p.sampler.__name__, "mirrored_sampling") old_mueff = self.p.mueff - self.p.update(dict(weights_option="1/mu"), reset_default_modules=True) + self.p.update(dict(weights_option="equal"), reset_default_modules=True) self.assertEqual(self.p.active, False) self.assertEqual(self.p.mirrored, None) - self.assertEqual(self.p.weights_option, "1/mu") + self.assertEqual(self.p.weights_option, "equal") self.assertNotEqual(self.p.mueff, old_mueff) - - def test_bipop_parameters(self): - self.p.local_restart = 'BIPOP' - self.p.used_budget += 11 + """Test BIPOPParameters.""" + self.p.local_restart = "BIPOP" + self.p.used_budget += 11 self.p.bipop_parameters.adapt(self.p.used_budget) self.assertEqual(self.p.bipop_parameters.large, True) - bp = self.p.bipop_parameters - self.assertEqual(bp.lambda_, self.p.lambda_*2) - self.assertEqual(bp.mu, self.p.mu*2) + bp = self.p.bipop_parameters + self.assertEqual(bp.lambda_, self.p.lambda_ * 2) + self.assertEqual(bp.mu, self.p.mu * 2) self.assertEqual(bp.sigma, 2) self.p.used_budget += 11 - bp.adapt(self.p.used_budget) + bp.adapt(self.p.used_budget) self.assertEqual(self.p.bipop_parameters.large, False) self.assertLessEqual(bp.lambda_, self.p.lambda_) self.assertLessEqual(bp.mu, self.p.mu) self.assertLessEqual(bp.sigma, self.p.init_sigma) self.p.used_budget += 11 - bp.adapt(self.p.used_budget) + bp.adapt(self.p.used_budget) self.assertEqual(bp.used_budget, 33) - def test_sampler(self): + """Test different samplers.""" for orth in (False, True): self.p.mirrored = None self.p.orthogonal = orth sampler = self.p.get_sampler() - self.assertEqual(sampler.__name__ == 'orthogonal_sampling', orth) - self.p.mirrored = 'mirrored' - sampler = self.p.get_sampler() - self.assertEqual(sampler.__name__, 'mirrored_sampling') - self.p.mirrored = 'mirrored pairwise' - self.assertEqual(sampler.__name__, 'mirrored_sampling') + self.assertEqual(sampler.__name__ == "orthogonal_sampling", orth) + self.p.mirrored = "mirrored" + sampler = self.p.get_sampler() + self.assertEqual(sampler.__name__, "mirrored_sampling") + self.p.mirrored = "mirrored pairwise" + self.assertEqual(sampler.__name__, "mirrored_sampling") def test_wrong_parameters(self): + """Test whether warnings are produced correctly.""" with self.assertWarns(RuntimeWarning): Parameters(1, mu=3, lambda_=2) def test_options(self): + """"Test setting of options.""" for module in Parameters.__modules__: m = getattr(Parameters, module) if type(m) == AnyOf: for o in m.options: setattr(self.p, module, o) - Parameters(1, **{module:o}) + Parameters(1, **{module: o}) - def step(self): - y = np.random.rand(self.p.lambda_, self.p.d).T - x = self.p.m.reshape(-1,1) * y + def step(self): + """Test a single iteration of the mechanism.""" + y = np.random.rand(self.p.lambda_, self.p.d).T + x = self.p.m.reshape(-1, 1) * y f = np.array(list(map(sum, x))) self.p.used_budget += self.p.lambda_ self.p.population = Population(x, y, f) @@ -100,8 +107,9 @@ def step(self): self.p.m *= np.linalg.norm(y, axis=1).reshape(-1, 1) self.p.adapt() self.p.old_population = self.p.population.copy() - + def set_parameter_and_step(self, pname, value, nstep=2, warning_action="default"): + """Test a single iteration of the mechanism. after setting a parameter.""" setattr(self.p, pname, value) with warnings.catch_warnings(): warnings.simplefilter(warning_action) @@ -109,29 +117,35 @@ def set_parameter_and_step(self, pname, value, nstep=2, warning_action="default" self.step() def test_tpa(self): - self.p.rank_tpa = .3 - self.set_parameter_and_step('step_size_adaptation', 'tpa') + """Test TPA.""" + self.p.rank_tpa = 0.3 + self.set_parameter_and_step("step_size_adaptation", "tpa") def test_msr(self): - self.set_parameter_and_step('step_size_adaptation', 'msr') - + """Test MSR.""" + self.set_parameter_and_step("step_size_adaptation", "msr") + def test_active(self): - self.set_parameter_and_step('active', True) - + """Test active.""" + self.set_parameter_and_step("active", True) + def test_reset(self): + """Test if C is correctly reset if it has inf.""" self.p.C[0][0] = np.inf self.step() def test_warning(self): + """Test whether warnings are produced correctly.""" self.p.compute_termination_criteria = True - self.set_parameter_and_step('max_iter', True, 5, 'ignore') - + self.set_parameter_and_step("max_iter", True, 5, "ignore") + def test_threshold(self): + """Test treshold mutation.""" self.step() self.assertEqual(type(self.p.threshold), np.float64) - def test_from_arary(self): + """Test instantiation from a config array.""" c_array = [0] * 11 with self.assertRaises(AttributeError): @@ -146,26 +160,24 @@ def test_from_arary(self): _c_array = c_array.copy() _c_array[0] = 2 p = Parameters.from_config_array(5, _c_array) - + p = Parameters.from_config_array(5, c_array) - def test_save_load(self): - tmpfile = os.path.join(os.path.dirname(__file__), 'tmp.pkl') + """Test pickle save and load mechanism.""" + tmpfile = os.path.join(os.path.dirname(__file__), "tmp.pkl") self.p.save(tmpfile) p = Parameters.load(tmpfile) os.remove(tmpfile) with self.assertRaises(OSError): self.p.load("__________") - - + with open(tmpfile, "wb") as f: pickle.dump({}, f) with self.assertRaises(AttributeError): self.p.load(tmpfile) os.remove(tmpfile) - -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/tests/test_population.py b/tests/test_population.py index f40b552..ce95df5 100644 --- a/tests/test_population.py +++ b/tests/test_population.py @@ -1,105 +1,113 @@ -import types +"""Module containing tests for ModularCMA-ES Population.""" + import unittest import numpy as np -from modcma import population, utils +from modcma import population class TestPopulation(unittest.TestCase): + + """Test case for Population object of Modular CMA-ES.""" + _dim = 5 _lambda = 16 - _sigma = .5 + _sigma = 0.5 def setUp(self): + """Test setup method.""" np.random.seed(12) self.set_params() - def set_params(self): + """Set default parameters.""" self.xmean = np.random.rand(self._dim, 1) self.B = np.eye(self._dim) self.D = np.ones((self._dim, 1)) self.z = np.random.multivariate_normal( - mean=np.zeros(self._dim), - cov=np.eye(self._dim), - size=self._lambda + mean=np.zeros(self._dim), cov=np.eye(self._dim), size=self._lambda ).T self.y = np.dot(self.B, self.D * self.z) self.x = self.xmean + (self._sigma * self.y) self.f = np.array([sum(i) for i in self.x.T]) self.pop = population.Population(self.x, self.y, self.f) - def correct_copy(self, instance, other): - self.assertNotEqual( - id(instance.x), id(other.x) - ) - self.assertNotEqual( - id(instance.y), id(other.y) - ) - self.assertNotEqual( - id(instance.f), id(other.f) - ) + """Test copy behaviour.""" + self.assertNotEqual(id(instance.x), id(other.x)) + self.assertNotEqual(id(instance.y), id(other.y)) + self.assertNotEqual(id(instance.f), id(other.f)) def test_creation(self): + """Test constructor behaviour.""" self.assertIsInstance(self.pop, population.Population) - self.correct_copy(self.pop, self) + self.correct_copy(self.pop, self.pop.copy()) def test_sort(self): + """Test sorting behaviour.""" self.pop.sort() rank = np.argsort(self.f) - for e in ("x", "y",): + for e in ("x", "y", ): self.assertListEqual( - getattr(self, e)[:, rank].tolist(), - getattr(self.pop, e).tolist() - ) - self.assertListEqual( - self.f[rank].tolist(), - self.pop.f.tolist() + getattr(self, e)[:, rank].tolist(), getattr(self.pop, e).tolist() ) + self.assertListEqual(self.f[rank].tolist(), self.pop.f.tolist()) def test_copy(self): + """Test copy behaviour.""" self.correct_copy(self.pop, self.pop.copy()) def test_getitem(self): + """Test whether errors are produced correctly.""" with self.assertRaises(KeyError): - self.pop['a'] + self.pop["a"] with self.assertRaises(KeyError): - self.pop[.1] + self.pop[0.1] self.assertIsInstance(self.pop[0], population.Population) self.assertIsInstance(self.pop[0:1], population.Population) self.assertIsInstance(self.pop[:-1], population.Population) - self.assertIsInstance(self.pop[[1,2,3]], population.Population) - - + self.assertIsInstance(self.pop[[1, 2, 3]], population.Population) + def test_1d(self): + """Test in 1d dimension.""" self._dim = 1 self.set_params() - population.Population( - self.x.ravel(), - self.y.ravel(), - self.f - ) + population.Population(self.x.ravel(), self.y.ravel(), self.f) def test_add(self): + """Test addition.""" self.pop += population.Population(self.x, self.y, self.f) - self.assertEqual(self.pop.x.shape, (self._dim, self._lambda*2,)) - self.assertEqual(self.pop.y.shape, (self._dim, self._lambda*2,)) - self.assertEqual(self.pop.f.shape, (self._lambda*2,)) + self.assertEqual( + self.pop.x.shape, + ( + self._dim, + self._lambda * 2, + ), + ) + self.assertEqual( + self.pop.y.shape, + ( + self._dim, + self._lambda * 2, + ), + ) + self.assertEqual(self.pop.f.shape, (self._lambda * 2,)) with self.assertRaises(TypeError): self.pop += 1 def test_n(self): + """Test n.""" self.assertEqual(self._lambda, self.pop.n) - - + def test_d(self): + """Test d.""" self.assertEqual(self._dim, self.pop.d) def test_repr(self): + """Test representation.""" self.assertEqual(type(repr(self.pop)), str) -if __name__ == '__main__': - unittest.main() +if __name__ == "__main__": + unittest.main() \ No newline at end of file diff --git a/tests/test_sampling.py b/tests/test_sampling.py index 8778ee1..e14916a 100644 --- a/tests/test_sampling.py +++ b/tests/test_sampling.py @@ -1,3 +1,5 @@ +"""Module containing tests for ModularCMA-ES samplers.""" + import types import unittest import numpy as np @@ -5,48 +7,62 @@ class TestSampling(unittest.TestCase): + """Test case for Modular CMA-ES samplers.""" + _dim = 5 def setUp(self): + """Test setup method.""" np.random.seed(12) def is_sampler(self, sampler): + """Test if a sampler is a sampler.""" self.assertIsInstance(sampler, types.GeneratorType) for x in range(10): sample = next(sampler) self.assertIsInstance(sample, np.ndarray) - self.assertEqual(sample.shape, (self._dim, 1, )) + self.assertEqual( + sample.shape, + ( + self._dim, + 1, + ), + ) def test_gaussian(self): + """Test gaussian sampling.""" sampler = sampling.gaussian_sampling(self._dim) self.is_sampler(sampler) def test_sobol(self): + """Test sobol sampling.""" sampler = sampling.sobol_sampling(self._dim) self.is_sampler(sampler) def test_halton(self): + """Test halton sampling.""" sampler = sampling.halton_sampling(self._dim) self.is_sampler(sampler) def test_orthogonal(self): + """Test orthogonal sampling.""" for base_sampler in ( - sampling.gaussian_sampling(self._dim), - sampling.sobol_sampling(self._dim), - sampling.halton_sampling(self._dim)): + sampling.gaussian_sampling(self._dim), + sampling.sobol_sampling(self._dim), + sampling.halton_sampling(self._dim), + ): for n_samples in (3, 6): - sampler = sampling.orthogonal_sampling( - base_sampler, n_samples) + sampler = sampling.orthogonal_sampling(base_sampler, n_samples) self.is_sampler(sampler) def test_mirrored(self): + """Test mirrored sampling.""" for base_sampler in ( - sampling.gaussian_sampling(self._dim), - sampling.sobol_sampling(self._dim), - sampling.halton_sampling(self._dim)): - sampler = sampling.mirrored_sampling( - base_sampler - ) + sampling.gaussian_sampling(self._dim), + sampling.sobol_sampling(self._dim), + sampling.halton_sampling(self._dim), + ): + sampler = sampling.mirrored_sampling(base_sampler) self.is_sampler(sampler) first_sample = next(sampler) second_sample = next(sampler) diff --git a/tests/test_utils.py b/tests/test_utils.py index 828568e..64a7191 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,4 +1,5 @@ -import os +"""Module containing tests for ModularCMA-ES Utilities.""" + import io import unittest import unittest.mock @@ -9,67 +10,74 @@ class TestUtils(unittest.TestCase): - + """Test case for utilities of Modular CMA-ES package.""" + def setUp(self): + """Test setup method.""" class Foo(utils.AnnotatedStruct): x: int - y: float = 0. + y: float = 0.0 z: np.ndarray = np.ones(5) c: (None, "x", "y", 1) = None - self.fooclass = Foo - + self.fooclass = Foo - @unittest.mock.patch('sys.stdout', new_callable=io.StringIO) + @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_timeit(self, mock_stdout): + """Test timit method.""" @utils.timeit def f(): pass + f() self.assertIn("Time elapsed", mock_stdout.getvalue()) def test_anyof(self): + """Test AnyOf descriptor.""" foo = self.fooclass(1) self.assertEqual(foo.c, None) with self.assertRaises(ValueError): - foo.c = 'z' + foo.c = "z" foo.c = 10 - foo.c = 1. - foo.c = 'x' - self.assertEqual(foo.c, 'x') + foo.c = 1.0 + foo.c = "x" + self.assertEqual(foo.c, "x") def test_instanceof(self): + """Test InstanceOf descriptor.""" foo = self.fooclass(1) self.assertEqual(int, type(foo.x)) self.assertEqual(float, type(foo.y)) self.assertEqual(np.ndarray, type(foo.z)) x = np.zeros(1) - foo.z = x + foo.z = x self.assertListEqual(foo.z.tolist(), x.tolist()) self.assertNotEqual(id(foo.z), id(x)) - + with self.assertRaises(TypeError): - bar = self.fooclass(None) - bar = self.fooclass('') - bar = self.fooclass('x') - bar = self.fooclass(1.) + _ = self.fooclass(None) + _ = self.fooclass("") + _ = self.fooclass("x") + _ = self.fooclass(1.0) foo.y = 1 - foo.y = 'z' + foo.y = "z" foo.z = 1 - foo.z = 'z' - + foo.z = "z" def test_metaclass_raises(self): + """Test metaclass raises correct error.""" with self.assertRaises(TypeError): class Foo(utils.AnnotatedStruct): - x: 'x' - + x: "x" + def test_repr(self): + """Test representation.""" self.assertEqual(type(repr(self.fooclass(1))), str) def test_descriptor(self): + """Test descriptor.""" class Foo: x = utils.Descriptor() @@ -78,24 +86,23 @@ class Foo: foo.x = 1 self.assertEqual(foo.x, 1) del foo.x - self.assertNotIn('x', foo.__dict__) + self.assertNotIn("x", foo.__dict__) def test_ert(self): + """Test ert method.""" evals = [5000, 45000, 1000, 100, 10] budget = 10000 ert, ert_sd, n_succ = utils.ert(evals, budget) self.assertEqual(n_succ, 4) - self.assertAlmostEqual( ert, 12777.5) + self.assertAlmostEqual(ert, 12777.5) self.assertAlmostEqual(ert_sd, 17484.642861665) - for evals in ([50000], [], [int(1e10)]): + for evals in ([50000], [], [int(1e10)]): ert, ert_sd, n_succ = utils.ert(evals, budget) self.assertEqual(ert, float("inf")) self.assertEqual(np.isnan(ert_sd), True) self.assertEqual(n_succ, 0) - - if __name__ == "__main__": unittest.main()