Skip to content

Commit

Permalink
better comments in preparation for book
Browse files Browse the repository at this point in the history
  • Loading branch information
thomasWeise committed Apr 23, 2022
1 parent 9c3d443 commit 92603cf
Show file tree
Hide file tree
Showing 7 changed files with 88 additions and 65 deletions.
26 changes: 13 additions & 13 deletions moptipy/algorithms/ea1plus1.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from moptipy.api.process import Process


# start book
class EA1plus1(Algorithm1):
"""
A (1+1)-EA is a simple local search accepting all non-worsening moves.
Expand All @@ -22,29 +23,28 @@ def solve(self, process: Process) -> None:
:param process: the process object
"""
# create records for old and new point in the search space
best_x = process.create()
new_x = process.create()
# obtain the random number generator
# Create records for old and new point in the search space.
best_x = process.create() # record for best-so-far solution
new_x = process.create() # record for new solution
# Obtain the random number generator.
random: Final[Generator] = process.get_random()

# Resolving things such as "process." or "self." costs time.
# We shovel a lot of function references into local variables
# to save time.
# Put function references in variables to save time.
evaluate: Final[Callable] = process.evaluate
op1: Final[Callable] = self.op1.op1
should_terminate: Final[Callable] = process.should_terminate

# Start at a random point in the search space and evaluate it.
self.op0.op0(random, best_x) # create one solution randomly
best_f: Union[int, float] = evaluate(best_x) # and evaluate it
self.op0.op0(random, best_x) # Create 1 solution randomly and
best_f: Union[int, float] = evaluate(best_x) # evaluate it.

while not should_terminate(): # until we need to quit...
while not should_terminate(): # Until we need to quit...
op1(random, new_x, best_x) # new_x = neighbor of best_x
new_f: Union[int, float] = evaluate(new_x)
if new_f <= best_f: # new_x is no worse than best_x?
best_f = new_f # use its objective value
best_x, new_x = new_x, best_x # swap best and new
if new_f <= best_f: # new_x is not worse than best_x?
best_f = new_f # Store its objective value.
best_x, new_x = new_x, best_x # Swap best and new.
# end book

def __str__(self) -> str:
"""
Expand Down
32 changes: 15 additions & 17 deletions moptipy/algorithms/fea1plus1.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from moptipy.api.process import Process


# start book
class FEA1plus1(Algorithm1):
"""
The FFA-based version of the (1+1)-EA: the (1+1)-FEA.
Expand Down Expand Up @@ -38,39 +39,36 @@ def solve(self, process: Process) -> None:
:param process: the process object
"""
# create records for old and new point in the search space
best_x = process.create()
new_x = process.create()
# Create records for old and new point in the search space.
best_x = process.create() # record for best-so-far solution
new_x = process.create() # record for new solution
lb: Final[int] = cast(int, process.lower_bound())

# h holds the encounter frequency of each objective value.
# By picking 32-bit integers as frequencies, we can do up to
# 4 billion FEs before the frequency fitness becomes unreliable.
h: Final[np.ndarray] = np.zeros(
cast(int, process.upper_bound()) - lb + 1, np.uint32)
# obtain the random number generator
cast(int, process.upper_bound()) - lb + 1, np.uint64)
# Obtain the random number generator.
random: Final[Generator] = process.get_random()

# Resolving things such as "process." or "self." costs time.
# We shovel a lot of function references into local variables
# to save time.
# Put function references in variables to save time.
evaluate: Final[Callable] = process.evaluate
op1: Final[Callable] = self.op1.op1
should_terminate: Final[Callable] = process.should_terminate

# Start at a random point in the search space and evaluate it.
self.op0.op0(random, best_x) # create one solution randomly
best_f: int = cast(int, evaluate(best_x)) - lb # and evaluate it
self.op0.op0(random, best_x) # Create 1 solution randomly and
best_f: int = cast(int, evaluate(best_x)) - lb # evaluate it.

while not should_terminate(): # until we need to quit...
while not should_terminate(): # Until we need to quit...
op1(random, new_x, best_x) # new_x = neighbor of best_x
new_f: int = cast(int, evaluate(new_x)) - lb

h[new_f] = h[new_f] + 1 # increase frequency of new_f
h[best_f] = best_h = h[best_f] + 1 # increase frequency of best_f
h[new_f] = h[new_f] + 1 # Increase frequency of new_f and
h[best_f] = best_h = h[best_f] + 1 # of best_f.
if h[new_f] <= best_h: # new_x is no worse than best_x?
best_f = new_f # use its objective value
best_x, new_x = new_x, best_x # swap best and new
best_f = new_f # Store its objective value.
best_x, new_x = new_x, best_x # Swap best and new.
# end book

def __str__(self) -> str:
"""
Expand Down
24 changes: 12 additions & 12 deletions moptipy/algorithms/hill_climber.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from moptipy.api.process import Process


# start book
class HillClimber(Algorithm1):
"""
The stochastic hill climbing algorithm only accepts improving moves.
Expand All @@ -22,29 +23,28 @@ def solve(self, process: Process) -> None:
:param process: the process object
"""
# create records for old and new point in the search space
best_x = process.create()
new_x = process.create()
# obtain the random number generator
# Create records for old and new point in the search space.
best_x = process.create() # record for best-so-far solution
new_x = process.create() # record for new solution
# Obtain the random number generator.
random: Final[Generator] = process.get_random()

# Resolving things such as "process." or "self." costs time.
# We shovel a lot of function references into local variables
# to save time.
# Put function references in variables to save time.
evaluate: Final[Callable] = process.evaluate
op1: Final[Callable] = self.op1.op1
should_terminate: Final[Callable] = process.should_terminate

# Start at a random point in the search space and evaluate it.
self.op0.op0(random, best_x) # create one solution randomly
best_f: Union[int, float] = evaluate(best_x) # and evaluate it
self.op0.op0(random, best_x) # Create 1 solution randomly and
best_f: Union[int, float] = evaluate(best_x) # evaluate it.

while not should_terminate(): # until we need to quit...
while not should_terminate(): # Until we need to quit...
op1(random, new_x, best_x) # new_x = neighbor of best_x
new_f: Union[int, float] = evaluate(new_x)
if new_f < best_f: # new_x is _better_ than best_x?
best_f = new_f # use its objective value
best_x, new_x = new_x, best_x # swap best and new
best_f = new_f # Store its objective value.
best_x, new_x = new_x, best_x # Swap best and new.
# end book

def __str__(self) -> str:
"""
Expand Down
12 changes: 6 additions & 6 deletions moptipy/algorithms/random_sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from moptipy.api.process import Process


# start book
class RandomSampling(Algorithm0):
"""In each step, random sampling creates a new, random solution."""

Expand All @@ -20,16 +21,15 @@ def solve(self, process: Process) -> None:
# obtain the random number generator
random: Final[Generator] = process.get_random()

# Resolving things such as "process." or "self." costs time.
# We shovel a lot of function references into local variables
# to save time.
# Put function references in variables to save time.
evaluate: Final[Callable] = process.evaluate
op0: Final[Callable] = self.op0.op0
should_terminate: Final[Callable] = process.should_terminate

while not should_terminate(): # until we need to quit...
op0(random, x) # sample a random solution
evaluate(x) # evaluate its quality... but ignore this info
while not should_terminate(): # Until we need to quit...
op0(random, x) # Sample a completely random solution.
evaluate(x) # Evaluate solution ... but ignore result.
# end book

def __str__(self) -> str:
"""
Expand Down
22 changes: 11 additions & 11 deletions moptipy/algorithms/random_walk.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
from moptipy.api.process import Process


# start book
class RandomWalk(Algorithm1):
"""
Perform a random walk through the search space.
Expand All @@ -35,26 +36,25 @@ def solve(self, process: Process) -> None:
:param process: the process object
"""
# create records for old and new point in the search space
old_x = process.create()
new_x = process.create()
# obtain the random number generator
old_x = process.create() # record for best-so-far solution
new_x = process.create() # record for new solution
# Obtain the random number generator.
random: Final[Generator] = process.get_random()

# Resolving things such as "process." or "self." costs time.
# We shovel a lot of function references into local variables
# to save time.
# Put function references in variables to save time.
evaluate: Final[Callable] = process.evaluate
op1: Final[Callable] = self.op1.op1
should_terminate: Final[Callable] = process.should_terminate

# Start at a random point in the search space and evaluate it.
self.op0.op0(random, new_x) # create one solution randomly
evaluate(new_x) # and evaluate it
self.op0.op0(random, new_x) # Create one solution randomly
evaluate(new_x) # and evaluate it.

while not should_terminate(): # until we need to quit...
old_x, new_x = new_x, old_x # swap old and new solution
while not should_terminate(): # Until we need to quit...
old_x, new_x = new_x, old_x # Swap old and new solution.
op1(random, new_x, old_x) # new_x = neighbor of old_x
evaluate(new_x) # evaluate the solution, ignore result
evaluate(new_x) # Evaluate the solution and ignore result.
# end book

def __str__(self) -> str:
"""
Expand Down
35 changes: 31 additions & 4 deletions moptipy/algorithms/single_random_sample.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,45 @@
from moptipy.api.process import Process


# start book
class SingleRandomSample(Algorithm0):
"""This algorithm creates one single random solution."""
"""
This algorithm creates one single random solution.
The single random sample algorithm applies the nullary search
operator to sample exactly one single random solution.
It then evaluates the solution by passing it to
:meth:`~moptipy.api.process.Process.evaluate`. It does nothing
else.
This is a very very bad optimization algorithm. We only use it in
our book to illustrate one basic concept for solving optimization
problems: The generation of random solutions. The single-random
sampling algorithm here is actually very wasteful: since it only
generates exactly one single solution, it does not use its
computational budget well. Even if you grant it 10'000 years, it
will still only generate one solution. Even if it could generate
and test thousands or millions of solutions, it will not do it.
Nevertheless, after applying this "algorithm," you will have one
valid solution remembered in the optimization process (embodied as
instance `process` of :class:`~moptipy.api.process.Process`).
This concept of random sampling is then refined in the
:class:`~moptipy.algorithms.random_sampling.RandomSampling`
algorithm, which repeats generating random solutions until its
allotted runtime is exhausted.
"""

def solve(self, process: Process) -> None:
"""
Apply the single random sampling approach.
:param process: the process object
"""
x: Final = process.create() # create the solution record
self.op0.op0(process.get_random(), x) # randomize contents
process.evaluate(x) # evaluate quality
x: Final = process.create() # Create the solution record.
self.op0.op0(process.get_random(), x) # Create random solution
process.evaluate(x) # Evaluate that random solution.
# end book

def __str__(self) -> str:
"""
Expand Down
2 changes: 0 additions & 2 deletions moptipy/examples/jssp/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

import moptipy.api.experiment as ex
from moptipy.algorithms.ea1plus1 import EA1plus1
from moptipy.algorithms.fea1plus1 import FEA1plus1
from moptipy.algorithms.hill_climber import HillClimber
from moptipy.algorithms.random_sampling import RandomSampling
from moptipy.algorithms.random_walk import RandomWalk
Expand Down Expand Up @@ -49,7 +48,6 @@
lambda inst, pwr: RandomSampling(Op0Shuffle(pwr)), # random sampling
lambda inst, pwr: HillClimber(Op0Shuffle(pwr), Op1Swap2()), # hill climb.
lambda inst, pwr: EA1plus1(Op0Shuffle(pwr), Op1Swap2()), # (1+1)-EA
lambda inst, pwr: FEA1plus1(Op0Shuffle(pwr), Op1Swap2()), # (1+1)-FEA
lambda inst, pwr: RandomWalk(Op0Shuffle(pwr), Op1Swap2()) # random walk
)

Expand Down

0 comments on commit 92603cf

Please sign in to comment.