Skip to content

Commit

Permalink
Merge pull request #10 from IOHprofiler/parameter-updating
Browse files Browse the repository at this point in the history
Parameter updating
  • Loading branch information
jacobdenobel authored Nov 26, 2020
2 parents 5422feb + 6ebd3a7 commit bbfa152
Show file tree
Hide file tree
Showing 4 changed files with 115 additions and 42 deletions.
2 changes: 2 additions & 0 deletions modcma/modularcmaes.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,8 @@ def break_conditions(self) -> List[bool]:
-------
[bool, bool]
'''
if self.parameters.n_generations:
return [self.parameters.t >= self.parameters.n_generations]
return [
self.parameters.target >= self.parameters.fopt,
self.parameters.used_budget >= self.parameters.budget
Expand Down
110 changes: 72 additions & 38 deletions modcma/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,20 +87,30 @@ class Parameters(AnnotatedStruct):
The dimensionality of the problem
target: float = -float("inf")
The absolute target of the optimization problem
budget: int = None
The maximum number of iterations
n_generations: int = None
The number of generations to run the optimizer. If this value is specified
this will override the default break-conditions, and the optimizer will only
stop after n_generations. Target-reached and budget will be ignored.
lambda_: int = None
The number of offspring in the population
mu: int = None
The number of parents in the population
budget: int = None
The maximum number of iterations
init_sigma: float = .5
The initial value of sigma (step size)
a_tpa: float = .5
Parameter used in TPA
b_tpa: float = 0.
Parameter used in TPA
cs: float = None
Learning rate parameter for sigma
Learning rate for the cumulation of the step size control
cc: float = None
Learning rate for the rank-one update
cmu: float = None
Learning rate for the rank-mu update
c1: float = None
Learning rate for the rank-one update
seq_cutoff_factor: int = 1
Used in sequential selection, the number of times mu individuals must be seen
before a sequential break can be performed
Expand All @@ -112,21 +122,15 @@ class Parameters(AnnotatedStruct):
The initial length theshold used in treshold convergence
decay_factor: float = 0.995
The decay for the threshold used in threshold covergence
max_resamples: int
The maximum amount of resamples which can be done when 'dismiss'-boundary correction is used
active: bool = False
Specifying whether to use active update.
[1] G. Jastrebski, D. V. Arnold, et al. Improving evolution strategies through
active covariance matrix adaptation. In Evolutionary Computation (CEC),
2006 IEEE Congress on, pages 2814–2821. IEEE, 2006
elitist: bool = False
Specifying whether to use an elitist approachCMAES
mirrored: str = (None, 'mirrored', mirrored pairwise', )
Specifying whether to use mirrored sampling
[2] D. Brockhoff, A. Auger, N. Hansen, D. V. CMAEST. Hohm.
Mirrored Sampling and Sequential SelectioCMAESion Strategies.
In R. Schaefer, C. Cotta, J. Kołodziej, aCMAESh, editors, Parallel
Problem Solving from Nature, PPSN XI: 11tCMAESnal Conference,
Kraków, Poland, September 11-15, 2010, PrCMAESart I, pages
11–21, Berlin, Heidelberg, 2010. SpringerCMAESelberg.
Specifying whether to use an elitist approach
sequential: bool = False
Specifying whether to use sequential selection
[3] D. Brockhoff, A. Auger, N. Hansen, D. V. Arnold, and T. Hohm.
Expand All @@ -148,14 +152,27 @@ class Parameters(AnnotatedStruct):
[5] H. Wang, M. Emmerich, and T. Bäck. Mirrored Orthogonal Sampling
with Pairwise Selection in Evolution Strategies. In Proceedings of the
29th Annual ACM Symposium on Applied Computing, pages 154–156.
ACM, 2014.
local_restart: str = (None, 'IPOP', )
Specifying which local restart strategy should be used
IPOP:
[11] Anne Auger and Nikolaus Hansen. A restart cma evolution strategy
with increasing population size. volume 2, pages 1769–1776, 01 2005
base_sampler: str = ('gaussian', 'sobol', 'halton',)
Denoting which base sampler to use, 'sobol', 'halton' can
be selected to sample from a quasi random sequence.
[6] A. Auger, M. Jebalia, and O. Teytaud. Algorithms (x, sigma, eta):
random mutations for evolution strategies. In Artificial Evolution:
7th International Conference, Revised Selected Papers, pages 296–307.
Springer, 2006.
mirrored: str = (None, 'mirrored', mirrored pairwise', )
Specifying whether to use mirrored sampling
[2] D. Brockhoff, A. Auger, N. Hansen, D. V. CMAEST. Hohm.
Mirrored Sampling and Sequential SelectioCMAESion Strategies.
In R. Schaefer, C. Cotta, J. Kołodziej, aCMAESh, editors, Parallel
Problem Solving from Nature, PPSN XI: 11tCMAESnal Conference,
Kraków, Poland, September 11-15, 2010, PrCMAESart I, pages
11–21, Berlin, Heidelberg, 2010. SpringerCMAESelberg.
ACM, 2014.
weights_option: str = ('default', '1/mu', '1/2^mu', )
Denoting the recombination weigths to be used.
[7] Sander van Rijn, Hao Wang, Matthijs van Leeuwen, and Thomas Bäck. 2016.
Expand All @@ -171,11 +188,6 @@ class Parameters(AnnotatedStruct):
A Median Success Rule for Non-Elitist Evolution Strategies: Study of Feasibility.
In Blum et al. Christian, editor,Genetic and Evolutionary Computation Conference,
pages 415–422, Amsterdam, Nether-lands, July 2013. ACM, ACM Press.
local_restart: str = (None, 'IPOP', )
Specifying which local restart strategy should be used
IPOP:
[11] Anne Auger and Nikolaus Hansen. A restart cma evolution strategy
with increasing population size. volume 2, pages 1769–1776, 01 2005
population: TypeVar('Population') = None
The current population of individuals
old_population: TypeVar('Population') = None
Expand All @@ -199,10 +211,6 @@ class Parameters(AnnotatedStruct):
The number of function evaluations used
fopt: float
The fitness of the current best individual
budget: int
The maximum number of objective function evaluations
target: float
The target value up until which to optimize
t: int
The number of generations
sigma_over_time: list
Expand Down Expand Up @@ -263,14 +271,6 @@ class Parameters(AnnotatedStruct):
The negative recombination weights, used in active update
mueff: float
The variance effective selection mass
c1: float
Learning rate for the rank-one update
cc: float
Learning rate for the rank-one update
cmu: float
Learning rate for the rank-mu update
cs: float
Learning rate for the cumulation of the step size control
damps: float
Used for adapting sigma with csa
chiN: np.ndarray
Expand All @@ -290,6 +290,7 @@ class Parameters(AnnotatedStruct):
d: int
target: float = -float("inf")
budget: int = None
n_generations: int = None
lambda_: int = None
mu: int = None
init_sigma: float = .5
Expand Down Expand Up @@ -376,7 +377,6 @@ def get_sampler(self) -> Generator:
sampler = mirrored_sampling(sampler)

return sampler


def init_fixed_parameters(self) -> None:
'''Initialization function for parameters that
Expand All @@ -400,8 +400,7 @@ def init_fixed_parameters(self) -> None:
self.budget,
self.mu / self.lambda_
)



def init_selection_parameters(self) -> None:
'''Initialization function for parameters that are of influence
in selection/population control.
Expand Down Expand Up @@ -432,7 +431,8 @@ def init_local_restart_parameters(self) -> None:
'''Initialization function for parameters that are used by
local restart strategies, i.e. IPOP.
'''
self.restarts.append(self.t)
if len(self.restarts) == 0:
self.restarts.append(self.t)
self.max_iter = 100 + 50 * (self.d + 3)**2 / np.sqrt(self.lambda_)
self.nbin = 10 + int(np.ceil(30 * self.d / self.lambda_))
self.n_stagnation = min(int(120 + (30 * self.d / self.lambda_)), 20000)
Expand All @@ -448,13 +448,13 @@ def init_adaptation_parameters(self) -> None:
if self.weights_option == '1/mu':
ws = np.ones(self.mu) / self.mu
self.weights = np.append(ws, ws[::-1] * -1)
if self.lambda_ %2 != 0:
self.weights = np.append([1/self.mu], self.weights)
if self.lambda_ % 2 != 0:
self.weights = np.append([1 / self.mu], self.weights)
elif self.weights_option == '1/2^mu':
ws = 1 / 2**np.arange(1, self.mu + 1) + (
(1 / (2**self.mu)) / self.mu)
self.weights = np.append(ws, ws[::-1] * -1)
if self.lambda_ %2 != 0:
if self.lambda_ % 2 != 0:
self.weights = np.append([1/self.mu**2], self.weights)
else:
self.weights = (np.log((self.lambda_ + 1) / 2) -
Expand Down Expand Up @@ -639,6 +639,7 @@ def perform_local_restart(self) -> None:
self.init_adaptation_parameters()
self.init_dynamic_parameters()
self.init_local_restart_parameters()
self.restarts.append(self.t)
else:
warnings.warn("Termination criteria met: {}".format(", ".join(
name for name, value in self.termination_criteria.items() if value
Expand Down Expand Up @@ -729,7 +730,6 @@ def save(self, filename:str='parameters.pkl') -> None:
self.sampler = None
pickle.dump(self, f)


def record_statistics(self) -> None:
'Method for recording metadata. '
self.flat_fitnesses.append(
Expand Down Expand Up @@ -792,3 +792,37 @@ def calculate_termination_criteria(self) -> None:
)
)
}

def update(self, parameters: dict, reset_default_modules=False):
'''Method to update the values of the Parameters object
based on a given dict of new parameters.
Note that some updated parameters might be overridden by:
self.init_selection_parameters()
self.init_adaptation_parameters()
self.init_local_restart_parameters()
which are called at the end of this function. Use with caution.
Parameters
----------
parameters: dict
A dict with new parameter values
reset_default_modules: bool = False
Whether to reset the modules back to their default values.
'''
if reset_default_modules:
for name in Parameters.__modules__:
default_option, *_ = getattr(getattr(Parameters, name),
"options", [False, True])
setattr(self, name, default_option)

for name, value in parameters.items():
if not hasattr(self, name):
raise ValueError(f"The parameter {name} doesn't exist")
setattr(self, name, value)

self.init_selection_parameters()
self.init_adaptation_parameters()
self.init_local_restart_parameters()
21 changes: 17 additions & 4 deletions tests/test_modularcmaes.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from modcma import parameters, utils, modularcmaes


class TestConfigurableCMAESMeta(type):
class TestModularCMAESMeta(type):
def __new__(classes, name, bases, clsdict):
def gen_test(module, value):
def do_test(self):
Expand All @@ -26,9 +26,9 @@ def do_test(self):
clsdict[f"test_standard"] = gen_test('active', True)
return super().__new__(classes, name, bases, clsdict)

class TestConfigurableCMAES(
class TestModularCMAES(
unittest.TestCase,
metaclass=TestConfigurableCMAESMeta):
metaclass=TestModularCMAESMeta):

_dim = 2
_budget = int(1e2 * _dim)
Expand Down Expand Up @@ -61,12 +61,25 @@ def test_local_restart(self):
c.step()


class TestConfigurableCMAESSingle(unittest.TestCase):
class TestModularCMAESSingle(unittest.TestCase):
def test_str_repr(self):
c = modularcmaes.ModularCMAES(sum, 5)
self.assertIsInstance(str(c), str)
self.assertIsInstance(repr(c), str)

def test_n_generations(self):
c = modularcmaes.ModularCMAES(sum, 5, n_generations = 5)
self.assertEqual(1, len(c.break_conditions))

for i in range(5):
c.step()

self.assertTrue(any(c.break_conditions))

c = modularcmaes.ModularCMAES(sum, 5)
self.assertEqual(2, len(c.break_conditions))


def testtpa_mutation(self):
class TpaParameters:
sigma = .4
Expand Down
24 changes: 24 additions & 0 deletions tests/test_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from modcma.parameters import Parameters
from modcma.utils import AnyOf
from modcma.sampling import mirrored_sampling
from modcma.population import Population


Expand All @@ -22,6 +23,29 @@ def try_wrong_types(self, p, name, type_):
with self.assertRaises(TypeError, msg=f"{name} {type_} {x}"):
setattr(p, name, x)


def test_updating(self):
self.p.update(dict(mirrored='mirrored'))
self.assertEqual(self.p.mirrored, 'mirrored')
self.assertEqual(self.p.sampler.__name__, 'mirrored_sampling')

with self.assertRaises(ValueError):
self.p.update(dict(nonexist=10))

self.p.update(dict(active=True))
self.assertEqual(self.p.active, True)
self.assertEqual(self.p.mirrored, 'mirrored')
self.assertEqual(self.p.sampler.__name__, 'mirrored_sampling')

old_mueff = self.p.mueff
self.p.update(dict(weights_option="1/mu"), reset_default_modules=True)
self.assertEqual(self.p.active, False)
self.assertEqual(self.p.mirrored, None)
self.assertEqual(self.p.weights_option, "1/mu")
self.assertNotEqual(self.p.mueff, old_mueff)



def test_bipop_parameters(self):
self.p.local_restart = 'BIPOP'
self.p.used_budget += 11
Expand Down

0 comments on commit bbfa152

Please sign in to comment.