From 1e7fb0e92b617c51f82636a443b6479e3e4aae0d Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 25 Jan 2023 14:25:35 +0100 Subject: [PATCH 01/87] Add local variant of adaptive control strategy (#11) * [WIP] first part of implementation of adaptivity for scalar quantities * [WIP] more code for serial variant of adaptivity * [WIP] Further implementation of adaptivity * [WIP] Further code to handle checkpointing of adaptivity * Formatting * [WIP] Corrections to adaptivity code to get it to work with solver dummy * [WIP] Corrections to adaptivity implementation * Modifying data from micro dummy to ensure that not same data is transferred all the time * Corrections in adaptivity strategy * Add adaptivity to preCICE export output and add integration test for adaptivity (WIP) * Code corrections * Formatting * Formatting * Formatting * New strategy to avoid amending arrays which are being looped from inside of the loop * [WIP] Moving adaptivity functionality to a different class called AdaptiveController * [WIP] correcting mistake in similarity distance calculation * [WIP] close to working solution, solving active sims more smartly * Formatting * Resolving several errors in adaptivity code and moving to serial-explicit scheme for the adaptivity test * Working version of adaptivity test case * Fix solverdummy * [WORKING] First working version of adaptivity * Add adaptivity test to GitHub Actions CI * Formatting * Trying to get the GitHub Action for adaptivity to work * [WIP] Adding parallel variant of adaptivity * Move to using local data sets for adaptivity calculation * Add docstrings to adaptivity related functions and adjust integration test * Removing reference to a function which does not exist * Remove return type hint of type tuple --- .github/workflows/run-adaptivity-test.yml | 28 ++ .github/workflows/run-macro-micro-dummy.yml | 2 - .gitignore | 1 + README.md | 21 +- .../micro-manager-config.json | 3 +- examples/macro-micro-dummy/micro_dummy.py | 4 +- micro_manager/adaptivity.py | 171 ++++++++++ micro_manager/config.py | 67 ++++ micro_manager/micro_manager.py | 317 +++++++++++++----- .../integration/test_adaptivity/clean-test.sh | 9 + .../test_adaptivity/macro_solver.py | 126 +++++++ .../test_adaptivity/micro-manager-config.json | 20 ++ .../test_adaptivity/micro_solver.py | 35 ++ .../test_adaptivity/precice-config.xml | 59 ++++ .../test_adaptivity/run_micro_manager.py | 11 + 15 files changed, 779 insertions(+), 95 deletions(-) create mode 100644 .github/workflows/run-adaptivity-test.yml create mode 100644 micro_manager/adaptivity.py create mode 100755 tests/integration/test_adaptivity/clean-test.sh create mode 100644 tests/integration/test_adaptivity/macro_solver.py create mode 100644 tests/integration/test_adaptivity/micro-manager-config.json create mode 100644 tests/integration/test_adaptivity/micro_solver.py create mode 100644 tests/integration/test_adaptivity/precice-config.xml create mode 100644 tests/integration/test_adaptivity/run_micro_manager.py diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml new file mode 100644 index 00000000..2b012902 --- /dev/null +++ b/.github/workflows/run-adaptivity-test.yml @@ -0,0 +1,28 @@ +name: Run integration test for adaptivity +on: + push: + branches: + - "*" + pull_request: + branches: + - "*" +jobs: + run_test_adaptivity: + name: Test adaptivity + runs-on: ubuntu-latest + container: precice/precice + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + - name: Install Dependencies + run: | + apt-get -qq update + apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Install micro-manager + run: pip3 install --user . + - name: Run adaptivity test + run: | + cd tests/integration/test_adaptivity/ + python3 macro_solver.py & python3 run_micro_manager.py diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index f852f5c6..6a16a6ad 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -20,8 +20,6 @@ jobs: apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config python -m pip install --upgrade pip pip install setuptools wheel twine - - name: Install pyprecice - run: pip3 install --user pyprecice - name: Install micro-manager run: pip3 install --user . - name: Run macro-micro dummy diff --git a/.gitignore b/.gitignore index 7dfa6f97..e9ecfc7a 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ __pycache__ # Building artifacts build +micro_manager_precice.egg-info/ micro_manager.egg-info/ # Packaging artifacts diff --git a/README.md b/README.md index db293d53..e0a14ec0 100644 --- a/README.md +++ b/README.md @@ -137,9 +137,28 @@ The Micro Manager is capable of generating diagnostics type output of the micro * `data_from_micro_sims`: A Python dictionary with the names of the data from the micro simulation to be written to VTK files as keys and `"scalar"` or `"vector"` as values. * `output_micro_sim_solve_time`: When `True`, the Manager writes the wall clock time of the `solve()` function of each micro simulation to the VTK output. +The Micro Manager can adaptively initialize micro simulations. The adaptivity strategy is taken from two publications: + +1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. 10.1016/j.jcp.2012.12.025. + +2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. 10.1016/j.amc.2020.125933. + +To turn on adaptivity, the following options need to be set in `simulation_params`: + +* `adaptivity`: Set as `True`. +* `adaptivity_data`: List of names of data which are to be used to calculate if two micro-simulations are similar or not. For example `["macro-scalar-data", "macro-vector-data"]` +* `adaptivity_history_param`: History parameter $\Lambda$, set as $\Lambda >= 0$. +* `adaptivity_coarsening_constant`: Coarsening constant $C_c$, set as $C_c < 1$. +* `adaptivity_refining_constant`: Refining constant $C_r$, set as $C_r >= 0$. + +All variables names are chosen to be same as the second publication mentioned above. + #### Changes to preCICE configuration file -The Micro Manager relies on the [export functionality](https://precice.org/configuration-export.html#enabling-exporters) of preCICE to write diagnostics data output. If the option `diagnotics: data_from_micro_sims` is configured, the corresponding export tag also needs to be set in the preCICE XML configuration script. +The Micro Manager relies on the [export functionality](https://precice.org/configuration-export.html#enabling-exporters) of preCICE to write diagnostics data output. + +* If the option `diagnotics: data_from_micro_sims` is configured, the corresponding export tag also needs to be set in the preCICE XML configuration script. +* If adaptivity is turned on, the Micro Manager will attempt to write a scalar data set `active_state` to preCICE. Add this data set to the preCICE configuration file. ### Running the Micro Manager diff --git a/examples/macro-micro-dummy/micro-manager-config.json b/examples/macro-micro-dummy/micro-manager-config.json index a2709c4d..390c2f2b 100644 --- a/examples/macro-micro-dummy/micro-manager-config.json +++ b/examples/macro-micro-dummy/micro-manager-config.json @@ -7,7 +7,8 @@ "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} }, "simulation_params": { - "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0] + "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], + "adaptivity": "False" }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/examples/macro-micro-dummy/micro_dummy.py b/examples/macro-micro-dummy/micro_dummy.py index 08fd56b9..f3f76270 100644 --- a/examples/macro-micro-dummy/micro_dummy.py +++ b/examples/macro-micro-dummy/micro_dummy.py @@ -26,9 +26,9 @@ def solve(self, macro_data, dt): print("Solve timestep of micro problem ({})".format(self._sim_id)) assert dt != 0 self._micro_vector_data = [] - self._micro_scalar_data = macro_data["macro-scalar-data"] + self._micro_scalar_data = macro_data["macro-scalar-data"] + 1 for d in range(self._dims): - self._micro_vector_data.append(macro_data["macro-vector-data"][d]) + self._micro_vector_data.append(macro_data["macro-vector-data"][d] + 1) return {"micro-scalar-data": self._micro_scalar_data.copy(), "micro-vector-data": self._micro_vector_data.copy()} diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py new file mode 100644 index 00000000..0f43f99d --- /dev/null +++ b/micro_manager/adaptivity.py @@ -0,0 +1,171 @@ +""" +Functionality for adaptive initialization and control of micro simulations +""" +import numpy as np +import sys + + +class AdaptiveController: + def __init__(self, configurator) -> None: + # Names of data to be used for adaptivity computation + self._refine_const = configurator.get_adaptivity_refining_const() + self._coarse_const = configurator.get_adaptivity_coarsening_const() + self._number_of_sims = None + + def set_number_of_sims(self, number_of_sims) -> None: + self._number_of_sims = number_of_sims + + def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np.ndarray) -> np.ndarray: + """ + Calculate metric which determines if two micro simulations are similar enough to have one of them deactivated. + + Parameters + ---------- + dt : float + Timestep + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + data : numpy array + Data to be used in similarity distance calculation + + Returns + ------- + similarity_dists : numpy array + Updated 2D array having similarity distances between each micro simulation pair + """ + _similarity_dists = np.copy(similarity_dists) + + if data.ndim == 1: + dim = 0 + elif data.ndim == 2: + _, dim = data.shape + + counter_1 = 0 + for id_1 in range(self._number_of_sims): + counter_2 = 0 + for id_2 in range(self._number_of_sims): + data_diff = 0 + if id_1 != id_2: + if dim: + for d in range(dim): + data_diff += abs(data[counter_1, d] - data[counter_2, d]) + else: + data_diff = abs(data[counter_1] - data[counter_2]) + + _similarity_dists[id_1, id_2] += dt * data_diff + else: + _similarity_dists[id_1, id_2] = 0 + counter_2 += 1 + counter_1 += 1 + + return _similarity_dists + + def update_active_micro_sims( + self, + similarity_dists: np.ndarray, + micro_sim_states: np.ndarray, + micro_sims: list) -> np.ndarray: + """ + Update set of active micro simulations. Active micro simulations are compared to each other and if found similar, one of them is deactivated. + + Parameters + ---------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation + micro_sims : list + List of objects of class MicroProblem, which are the micro simulations + + Returns + ------- + _micro_sim_states : numpy array + Updated 1D array having state (active or inactive) of each micro simulation + """ + coarse_tol = self._coarse_const * self._refine_const * np.amax(similarity_dists) + + _micro_sim_states = np.copy(micro_sim_states) + + # Update the set of active micro sims + for id_1 in range(self._number_of_sims): + if _micro_sim_states[id_1]: # if id_1 sim is active + for id_2 in range(self._number_of_sims): + if _micro_sim_states[id_2]: # if id_2 is active + if id_1 != id_2: # don't compare active sim to itself + # If active sim is similar to another active sim, + # deactivate it + if similarity_dists[id_1, id_2] < coarse_tol: + micro_sims[id_1].deactivate() + _micro_sim_states[id_1] = 0 + break + + return _micro_sim_states + + def update_inactive_micro_sims( + self, + similarity_dists: np.ndarray, + micro_sim_states: np.ndarray, + micro_sims: list) -> np.ndarray: + """ + Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones and if it is not similar to any of them, it is activated. + + Parameters + ---------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation + micro_sims : list + List of objects of class MicroProblem, which are the micro simulations + + Returns + ------- + _micro_sim_states : numpy array + Updated 1D array having state (active or inactive) of each micro simulation + """ + ref_tol = self._refine_const * np.amax(similarity_dists) + + _micro_sim_states = np.copy(micro_sim_states) + + if not np.any(_micro_sim_states): + _micro_sim_states[0] = 1 # If all sims are inactive, activate the first one (a random choice) + + # Update the set of inactive micro sims + for id_1 in range(self._number_of_sims): + dists = [] + if not _micro_sim_states[id_1]: # if id_1 is inactive + for id_2 in range(self._number_of_sims): + if _micro_sim_states[id_2]: # if id_2 is active + dists.append(similarity_dists[id_1, id_2]) + # If inactive sim is not similar to any active sim, activate it + if min(dists) > ref_tol: + micro_sims[id_1].activate() + _micro_sim_states[id_1] = 1 + + return _micro_sim_states + + def associate_inactive_to_active(self, similarity_dists, micro_sim_states, micro_sims): + """ + Associate inactive micro simulations to most similar active micro simulation. + + Parameters + ---------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation + micro_sims : list + List of objects of class MicroProblem, which are the micro simulations + """ + active_sim_ids = np.where(micro_sim_states == 1)[0] + inactive_sim_ids = np.where(micro_sim_states == 0)[0] + + # Associate inactive micro sims to active micro sims + for id_1 in inactive_sim_ids: + dist_min = sys.float_info.max + for id_2 in active_sim_ids: + # Find most similar active sim for every inactive sim + if similarity_dists[id_1, id_2] < dist_min: + micro_id = id_2 + dist_min = similarity_dists[id_1, id_2] + micro_sims[id_1].is_most_similar_to(micro_id) diff --git a/micro_manager/config.py b/micro_manager/config.py index 3b207ffc..13a6e5a8 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -35,6 +35,12 @@ def __init__(self, config_filename): self._output_micro_sim_time = False + self._adaptivity = False + self._data_for_adaptivity = dict() + self._adaptivity_history_param = 0.5 + self._adaptivity_coarsening_constant = 0.5 + self._adaptivity_refining_constant = 0.5 + self.read_json(config_filename) def read_json(self, config_filename): @@ -97,6 +103,22 @@ def read_json(self, config_filename): print("Output interval of micro simulations not specified, if output is available then it will be called " "in every time window.") + try: + self._adaptivity = data["simulation_params"]["adaptivity"] + + exchange_data = {**self._read_data_names, **self._write_data_names} + for dname in data["simulation_params"]["adaptivity_data"]: + self._data_for_adaptivity[dname] = exchange_data[dname] + + self._adaptivity_history_param = data["simulation_params"]["adaptivity_history_param"] + self._adaptivity_coarsening_constant = data["simulation_params"]["adaptivity_coarsening_constant"] + self._adaptivity_refining_constant = data["simulation_params"]["adaptivity_refining_constant"] + + self._write_data_names["active_state"] = False + except BaseException: + print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations " + "in all time steps.") + try: diagnostics_data_names = data["diagnostics"]["data_from_micro_sims"] assert isinstance(diagnostics_data_names, dict), "Diagnostics data is not a dictionary" @@ -212,3 +234,48 @@ def write_micro_solve_time(self): True if micro simulation solve time is required. """ return self._output_micro_sim_time + + def turn_on_adaptivity(self): + """ + + Returns + ------- + + """ + return self._adaptivity + + def get_data_for_adaptivity(self): + """ + + Returns + ------- + + """ + return self._data_for_adaptivity + + def get_adaptivity_hist_param(self): + """ + + Returns + ------- + + """ + return self._adaptivity_history_param + + def get_adaptivity_coarsening_const(self): + """ + + Returns + ------- + + """ + return self._adaptivity_coarsening_constant + + def get_adaptivity_refining_const(self): + """ + + Returns + ------- + + """ + return self._adaptivity_refining_constant diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 9580119f..db64cb5a 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -1,45 +1,51 @@ #!/usr/bin/env python3 """ -Micro manager to organize many micro simulations and couple them via preCICE to a macro simulation +Micro Manager: a tool to initialize and adaptively control micro simulations and couple them via preCICE to a macro simulation """ import argparse import os import sys import precice -from .config import Config from mpi4py import MPI -from math import sqrt +from math import sqrt, exp import numpy as np -from functools import reduce -from operator import iconcat -import hashlib import logging import time +from .config import Config +from .adaptivity import AdaptiveController + sys.path.append(os.getcwd()) def create_micro_problem_class(base_micro_simulation): """ Creates a class MicroProblem which inherits from the class of the micro simulation. + Parameters ---------- base_micro_simulation : class The base class from the micro simulation script. + Returns ------- MicroProblem : class Definition of class MicroProblem defined in this function. """ class MicroProblem(base_micro_simulation): - def __init__(self, micro_sim_id): - base_micro_simulation.__init__(self, micro_sim_id) - self._id = micro_sim_id + def __init__(self, local_id, global_id): + base_micro_simulation.__init__(self, local_id) + self._local_id = local_id + self._global_id = global_id self._is_active = False + self._most_similar_active_local_id = 0 + + def get_local_id(self): + return self._local_id - def get_id(self): - return self._id + def get_global_id(self): + return self._global_id def activate(self): self._is_active = True @@ -47,11 +53,24 @@ def activate(self): def deactivate(self): self._is_active = False + def is_most_similar_to(self, similar_active_local_id): + assert self._is_active is False, "Micro simulation {} is active and hence cannot be most similar to another active simulation".format( + self._global_id) + self._most_similar_active_id = similar_active_local_id + + def get_most_similar_active_id(self): + assert self._is_active is False, "Micro simulation {} is active and hence cannot have a most similar active id".format( + self._global_id) + return self._most_similar_active_local_id + + def is_active(self): + return self._is_active + return MicroProblem class MicroManager: - def __init__(self, config_file): + def __init__(self, config_file) -> None: """ Constructor of MicroManager class. @@ -66,8 +85,11 @@ def __init__(self, config_file): self._logger = logging.getLogger(__name__) self._logger.setLevel(level=logging.INFO) - fh = logging.FileHandler('micro-manager.log') # Create file handler which logs messages + + # Create file handler which logs messages + fh = logging.FileHandler('micro-manager.log') fh.setLevel(logging.INFO) + # Create formatter and add it to handlers formatter = logging.Formatter('[' + str(self._rank) + '] %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) @@ -99,19 +121,39 @@ def __init__(self, config_file): for name in self._read_data_names.keys(): self._read_data_ids[name] = self._interface.get_data_id(name, self._macro_mesh_id) + self._data_used_for_adaptivity = dict() + self._macro_bounds = config.get_macro_domain_bounds() self._is_micro_solve_time_required = config.write_micro_solve_time() - self._number_of_micro_simulations = None + self._local_number_of_micro_sims = None + self._global_number_of_micro_sims = None self._is_rank_empty = False - self._micro_sims = None + self._micro_sims = None # Array carrying micro simulation objects self._dt = None - self._mesh_vertex_ids = None + self._mesh_vertex_ids = None # IDs of macro vertices as set by preCICE self._micro_n_out = config.get_micro_output_n() - def decompose_macro_domain(self, macro_bounds): + self._is_adaptivity_on = config.turn_on_adaptivity() + + if self._is_adaptivity_on: + self._adaptivity_controller = AdaptiveController(config) + self._hist_param = config.get_adaptivity_hist_param() + self._adaptivity_data_names = config.get_data_for_adaptivity() + + # Names of macro data to be used for adaptivity computation + self._adaptivity_macro_data_names = dict() + # Names of micro data to be used for adaptivity computation + self._adaptivity_micro_data_names = dict() + for name, is_data_vector in self._adaptivity_data_names.items(): + if name in self._read_data_names: + self._adaptivity_macro_data_names[name] = is_data_vector + if name in self._write_data_names: + self._adaptivity_micro_data_names[name] = is_data_vector + + def decompose_macro_domain(self, macro_bounds) -> list: """ - Decompose the macro domain equally among all ranks, if the Micro Manager is run in paralle. + Decompose the macro domain equally among all ranks, if the Micro Manager is run in parallel. Parameters ---------- @@ -142,27 +184,27 @@ def decompose_macro_domain(self, macro_bounds): if self._interface.get_dimensions() == 2: mesh_bounds = [local_xmin, local_xmin + dx, local_ymin, local_ymin + dy] elif self._interface.get_dimensions() == 3: - # TODO: Domain needs to be decomposed optimally in the Z direction too - mesh_bounds = [local_xmin, local_xmin + dx, local_ymin, local_ymin + dy, macro_bounds[4], - macro_bounds[5]] + # TODO: Domain needs to be decomposed optimally in the Z direction + # too + mesh_bounds = [local_xmin, local_xmin + dx, local_ymin, local_ymin + dy, macro_bounds[4], macro_bounds[5]] self._logger.info("Bounding box limits are {}".format(mesh_bounds)) return mesh_bounds - def initialize(self): + def initialize(self) -> None: """ This function does the following things: - - If the Micro Manager has been executed in parallel, it decomposes the domain as equally as possible. + - If the Micro Manager has been executed in parallel, it decomposes the domain as uniformly as possible. - Initializes preCICE. - - Get the macro mesh information. - - Creates all micro simulation objects and initializes them if the an initialization procedure is available. + - Gets the macro mesh information from preCICE. + - Creates all micro simulation objects and initializes them if an initialization procedure is available. - Writes initial data to preCICE. """ - # Decompose the macro-domain and set the mesh access region for each partition in preCICE - assert len( - self._macro_bounds) / 2 == self._interface.get_dimensions(), "Provided macro mesh bounds are of " \ - "incorrect dimension" + # Decompose the macro-domain and set the mesh access region for each + # partition in preCICE + assert len(self._macro_bounds) / \ + 2 == self._interface.get_dimensions(), "Provided macro mesh bounds are of incorrect dimension" if self._is_parallel: coupling_mesh_bounds = self.decompose_macro_domain(self._macro_bounds) else: @@ -174,21 +216,35 @@ def initialize(self): self._dt = self._interface.initialize() self._mesh_vertex_ids, mesh_vertex_coords = self._interface.get_mesh_vertices_and_ids(self._macro_mesh_id) - self._number_of_micro_simulations, _ = mesh_vertex_coords.shape - self._logger.info("Number of micro simulations = {}".format(self._number_of_micro_simulations)) + self._local_number_of_micro_sims, _ = mesh_vertex_coords.shape + self._logger.info("Number of local micro simulations = {}".format(self._local_number_of_micro_sims)) + + for name, is_data_vector in self._adaptivity_data_names.items(): + if is_data_vector: + self._data_used_for_adaptivity[name] = np.zeros( + (self._local_number_of_micro_sims, self._interface.get_dimensions())) + else: + self._data_used_for_adaptivity[name] = np.zeros((self._local_number_of_micro_sims)) - if self._number_of_micro_simulations == 0: + if self._local_number_of_micro_sims == 0: if self._is_parallel: - self._logger.info("Rank {} has no micro simulations and hence will not do any computation.".format( - self._rank)) + self._logger.info( + "Rank {} has no micro simulations and hence will not do any computation.".format( + self._rank)) self._is_rank_empty = True else: raise Exception("Micro Manager has no micro simulations.") nms_all_ranks = np.zeros(self._size, dtype=np.int64) # Gather number of micro simulations that each rank has, because this rank needs to know how many micro - # simulations have been created by previous ranks, so that it can set the correct IDs - self._comm.Allgather(np.array(self._number_of_micro_simulations), nms_all_ranks) + # simulations have been created by previous ranks, so that it can set + # the correct global IDs + self._comm.Allgather(np.array(self._local_number_of_micro_sims), nms_all_ranks) + + # Get global number of micro simulations + self._global_number_of_micro_sims = np.sum(nms_all_ranks) + + self._adaptivity_controller.set_number_of_sims(self._local_number_of_micro_sims) # Create all micro simulations sim_id = 0 @@ -197,59 +253,53 @@ def initialize(self): sim_id += nms_all_ranks[i] self._micro_sims = [] - for _ in range(self._number_of_micro_simulations): - self._micro_sims.append(create_micro_problem_class(self._micro_problem)(sim_id)) + self._micro_sim_global_ids = [] + for i in range(self._local_number_of_micro_sims): + self._micro_sims.append(create_micro_problem_class(self._micro_problem)(i, sim_id)) + self._micro_sim_global_ids.append(sim_id) sim_id += 1 - write_data = dict() - for name in self._write_data_names.keys(): - write_data[name] = [] + micro_sims_output = list(range(self._local_number_of_micro_sims)) - # Initialize all micro simulations + # Initialize micro simulations if initialize() method exists if hasattr(self._micro_problem, 'initialize') and callable(getattr(self._micro_problem, 'initialize')): - for micro_sim in self._micro_sims: - micro_sims_output = micro_sim.initialize() - if micro_sims_output is not None: + for i in range(self._local_number_of_micro_sims): + micro_sims_output[i] = self._micro_sims[i].initialize() + if micro_sims_output[i] is not None: if self._is_micro_solve_time_required: - micro_sims_output["micro_sim_time"] = 0.0 - - for data_name, data in micro_sims_output.items(): - write_data[data_name].append(data) + micro_sims_output[i]["micro_sim_time"] = 0.0 + if self._is_adaptivity_on: + micro_sims_output[i]["active_state"] = 0 else: + micro_sims_output[i] = dict() for name, is_data_vector in self._write_data_names.items(): if is_data_vector: - write_data[name].append(np.zeros(self._interface.get_dimensions())) + micro_sims_output[i][name] = np.zeros(self._interface.get_dimensions()) else: - write_data[name].append(0.0) + micro_sims_output[i][name] = 0.0 - self._logger.info("Micro simulations {} - {} initialized.".format(self._micro_sims[0].get_id(), - self._micro_sims[-1].get_id())) + self._logger.info("Micro simulations with global IDs {} - {} initialized.".format( + self._micro_sim_global_ids[0], self._micro_sim_global_ids[-1])) self._micro_sims_have_output = False if hasattr(self._micro_problem, 'output') and callable(getattr(self._micro_problem, 'output')): self._micro_sims_have_output = True - # Initialize coupling data + # Write initial data if required if self._interface.is_action_required(precice.action_write_initial_data()): - for dname, dim in self._write_data_names.items(): - if dim == 1: - self._interface.write_block_vector_data(self._write_data_ids[dname], self._mesh_vertex_ids, - write_data[dname]) - elif dim == 0: - self._interface.write_block_scalar_data(self._write_data_ids[dname], self._mesh_vertex_ids, - write_data[dname]) + self.write_data_to_precice(micro_sims_output) self._interface.mark_action_fulfilled(precice.action_write_initial_data()) self._interface.initialize_data() - def read_data_from_precice(self): + def read_data_from_precice(self) -> list: """ Read data from preCICE. Depending on initial definition of whether a data is scalar or vector, the appropriate preCICE API command is called. Returns ------- - list : list + local_read_data : list List of dicts in which keys are names of data being read and the values are the data from preCICE. """ read_data = dict() @@ -258,15 +308,21 @@ def read_data_from_precice(self): for name, is_data_vector in self._read_data_names.items(): if is_data_vector: - read_data.update({name: self._interface.read_block_vector_data(self._read_data_ids[name], - self._mesh_vertex_ids)}) + read_data.update({name: self._interface.read_block_vector_data( + self._read_data_ids[name], self._mesh_vertex_ids)}) + if name in self._adaptivity_macro_data_names: + self._data_used_for_adaptivity[name] = read_data[name] else: - read_data.update({name: self._interface.read_block_scalar_data(self._read_data_ids[name], - self._mesh_vertex_ids)}) + read_data.update({name: self._interface.read_block_scalar_data( + self._read_data_ids[name], self._mesh_vertex_ids)}) + if name in self._adaptivity_macro_data_names: + self._data_used_for_adaptivity[name] = read_data[name] + + local_read_data = [dict(zip(read_data, t)) for t in zip(*read_data.values())] - return [dict(zip(read_data, t)) for t in zip(*read_data.values())] + return local_read_data - def write_data_to_precice(self, micro_sims_output): + def write_data_to_precice(self, micro_sims_output: list) -> None: """ Write output of micro simulations to preCICE. @@ -280,27 +336,30 @@ def write_data_to_precice(self, micro_sims_output): for name in micro_sims_output[0]: write_data[name] = [] - for dic in micro_sims_output: - for name, values in dic.items(): + for i in range(self._local_number_of_micro_sims): + for name, values in micro_sims_output[i].items(): write_data[name].append(values) for dname, is_data_vector in self._write_data_names.items(): if is_data_vector: - self._interface.write_block_vector_data(self._write_data_ids[dname], self._mesh_vertex_ids, - write_data[dname]) + self._interface.write_block_vector_data( + self._write_data_ids[dname], self._mesh_vertex_ids, write_data[dname]) else: - self._interface.write_block_scalar_data(self._write_data_ids[dname], self._mesh_vertex_ids, - write_data[dname]) + self._interface.write_block_scalar_data( + self._write_data_ids[dname], self._mesh_vertex_ids, write_data[dname]) else: for dname, is_data_vector in self._write_data_names.items(): if is_data_vector: - self._interface.write_block_vector_data(self._write_data_ids[dname], [], np.array([])) + self._interface.write_block_vector_data( + self._write_data_ids[dname], [], np.array([])) else: - self._interface.write_block_scalar_data(self._write_data_ids[dname], [], np.array([])) + self._interface.write_block_scalar_data( + self._write_data_ids[dname], [], np.array([])) - def solve_micro_simulations(self, micro_sims_input): + def solve_micro_simulations(self, micro_sims_input: dict, similarity_dists_nm1: np.ndarray, + micro_sim_states_nm1: np.ndarray): """ - Solve all micro simulations using the input data and assemble the micro simulations outputs in a list of dicts + Solve all micro simulations using the data read from preCICE and assemble the micro simulations outputs in a list of dicts format. Parameters @@ -315,16 +374,75 @@ def solve_micro_simulations(self, micro_sims_input): List of dicts in which keys are names of data and the values are the data of the output of the micro simulations. """ - micro_sims_output = [] - for i in range(self._number_of_micro_simulations): - self._logger.info("Solving micro simulation ({})".format(self._micro_sims[i].get_id())) + if self._is_adaptivity_on: + # Multiply old similarity distance by history term to get current distances + similarity_dists_n = exp(-self._hist_param * self._dt) * similarity_dists_nm1 + + for name, _ in self._adaptivity_data_names.items(): + similarity_dists_n = self._adaptivity_controller.get_similarity_dists( + self._dt, similarity_dists_n, self._data_used_for_adaptivity[name]) + + micro_sim_states_n = self._adaptivity_controller.update_active_micro_sims( + similarity_dists_n, micro_sim_states_nm1, self._micro_sims) + + micro_sim_states_n = self._adaptivity_controller.update_inactive_micro_sims( + similarity_dists_n, micro_sim_states_n, self._micro_sims) + + self._adaptivity_controller.associate_inactive_to_active( + similarity_dists_n, micro_sim_states_n, self._micro_sims) + + active_sim_ids = np.where(micro_sim_states_n == 1)[0] + inactive_sim_ids = np.where(micro_sim_states_n == 0)[0] + + else: + # If adaptivity is off, all micro simulations are active + active_sim_ids = np.where(micro_sim_states_nm1 == 1)[0] + inactive_sim_ids = np.where(micro_sim_states_nm1 == 0)[0] + + micro_sims_output = list(range(self._local_number_of_micro_sims)) + + # Solve all active micro simulations + for i in active_sim_ids: + self._logger.info("Solving active micro sim [{}]".format(self._micro_sims[i].get_global_id())) + start_time = time.time() - micro_sims_output.append(self._micro_sims[i].solve(micro_sims_input[i], self._dt)) + micro_sims_output[i] = self._micro_sims[i].solve(micro_sims_input[i], self._dt) end_time = time.time() + + if self._is_adaptivity_on: + # Mark the micro sim as active for export + micro_sims_output[i]["active_state"] = 1 + + for name in self._adaptivity_micro_data_names: + # Collect micro sim output for adaptivity + self._data_used_for_adaptivity[name][i] = micro_sims_output[i][name] + if self._is_micro_solve_time_required: micro_sims_output[i]["micro_sim_time"] = end_time - start_time - return micro_sims_output + # For each inactive simulation, copy data from most similar active simulation + for i in inactive_sim_ids: + self._logger.info( + "Micro sim [{}] is inactive. Copying data from most similar active micro " + "sim [{}]".format( + self._micro_sims[i].get_global_id(), + self._micro_sims[i].get_most_similar_active_id())) + + micro_sims_output[i] = dict() + for dname, values in micro_sims_output[self._micro_sims[i].get_most_similar_active_id()].items(): + micro_sims_output[i][dname] = values + + start_time = end_time = 0 + micro_sims_output[i]["active_state"] = 0 + + for name in self._adaptivity_micro_data_names: + # Collect micro sim output for adaptivity + self._data_used_for_adaptivity[name][i] = micro_sims_output[i][name] + + if self._is_micro_solve_time_required: + micro_sims_output[i]["micro_sim_time"] = end_time - start_time + + return micro_sims_output, similarity_dists_n, micro_sim_states_n def solve(self): """ @@ -332,6 +450,11 @@ def solve(self): """ t, n = 0, 0 t_checkpoint, n_checkpoint = 0, 0 + similarity_dists = np.zeros((self._local_number_of_micro_sims, self._local_number_of_micro_sims)) + micro_sim_states = np.zeros((self._local_number_of_micro_sims)) + + similarity_dists_cp = None + micro_sim_states_cp = None while self._interface.is_coupling_ongoing(): # Write checkpoints for all micro simulations @@ -340,11 +463,18 @@ def solve(self): micro_sim.save_checkpoint() t_checkpoint = t n_checkpoint = n - self._interface.mark_action_fulfilled(precice.action_write_iteration_checkpoint()) + + if self._is_adaptivity_on: + similarity_dists_cp = similarity_dists + micro_sim_states_cp = micro_sim_states + + self._interface.mark_action_fulfilled( + precice.action_write_iteration_checkpoint()) micro_sims_input = self.read_data_from_precice() - micro_sims_output = self.solve_micro_simulations(micro_sims_input) + micro_sims_output, similarity_dists, micro_sim_states = self.solve_micro_simulations( + micro_sims_input, similarity_dists, micro_sim_states) self.write_data_to_precice(micro_sims_output) @@ -359,10 +489,16 @@ def solve(self): micro_sim.reload_checkpoint() n = n_checkpoint t = t_checkpoint - self._interface.mark_action_fulfilled(precice.action_read_iteration_checkpoint()) + + if self._is_adaptivity_on: + similarity_dists = similarity_dists_cp + micro_sim_states = micro_sim_states_cp + + self._interface.mark_action_fulfilled( + precice.action_read_iteration_checkpoint()) else: # Time window has converged, now micro output can be generated self._logger.info("Micro simulations {} - {}: time window t = {} has converged".format( - self._micro_sims[0].get_id(), self._micro_sims[-1].get_id(), t)) + self._micro_sims[0].get_global_id(), self._micro_sims[-1].get_global_id(), t)) if self._micro_sims_have_output: if n % self._micro_n_out == 0: @@ -374,7 +510,10 @@ def solve(self): def main(): parser = argparse.ArgumentParser(description='.') - parser.add_argument('config_file', type=str, help='Path to the JSON config file of the manager.') + parser.add_argument( + 'config_file', + type=str, + help='Path to the JSON config file of the manager.') args = parser.parse_args() config_file_path = args.config_file diff --git a/tests/integration/test_adaptivity/clean-test.sh b/tests/integration/test_adaptivity/clean-test.sh new file mode 100755 index 00000000..866dffb5 --- /dev/null +++ b/tests/integration/test_adaptivity/clean-test.sh @@ -0,0 +1,9 @@ +rm -fv *-events-summary.json +rm -fv *-events.json +rm -fv *.log +rm -r -fv precice-run/ +rm -fv *.vtk +rm -fv *.out +rm -fv *.err +rm -fv output/*.vtu +rm -fv output/*.pvtu diff --git a/tests/integration/test_adaptivity/macro_solver.py b/tests/integration/test_adaptivity/macro_solver.py new file mode 100644 index 00000000..f5649bd0 --- /dev/null +++ b/tests/integration/test_adaptivity/macro_solver.py @@ -0,0 +1,126 @@ +#! /usr/bin/env python3 +# + +import numpy as np +import precice + + +def main(): + """ + Dummy macro simulation which is coupled to a set of micro simulations via preCICE and the Micro Manager + """ + n = n_checkpoint = 0 + t = t_checkpoint = 0 + + # preCICE setup + interface = precice.Interface("macro-cube", "precice-config.xml", 0, 1) + + # define coupling meshes + read_mesh_name = write_mesh_name = "macro-cube-mesh" + read_mesh_id = interface.get_mesh_id(read_mesh_name) + read_data_names = {"micro-scalar-data": 0, "micro-vector-data": 1} + + write_mesh_id = interface.get_mesh_id(write_mesh_name) + write_data_names = {"macro-scalar-data": 0, "macro-vector-data": 1} + + # Coupling mesh - unit cube with 5 points in each direction + np_axis = 4 + x_coords, y_coords, z_coords = np.meshgrid( + np.linspace(0, 1, np_axis), + np.linspace(0, 1, np_axis), + np.linspace(0, 1, np_axis) + ) + + nv = np_axis ** interface.get_dimensions() + coords = np.zeros((nv, interface.get_dimensions())) + + write_scalar_data = np.zeros(nv) + write_vector_data = np.zeros((nv, interface.get_dimensions())) + + scalar_value = 1.0 + vector_value = [2.0, 3.0, 4.0] + for z in range(np_axis): + for y in range(np_axis): + for x in range(np_axis): + n = x + y * np_axis + z * np_axis * np_axis + coords[n, 0] = x_coords[x, y, z] + coords[n, 1] = y_coords[x, y, z] + coords[n, 2] = z_coords[x, y, z] + write_scalar_data[n] = scalar_value + write_vector_data[n, 0] = vector_value[0] + write_vector_data[n, 1] = vector_value[1] + write_vector_data[n, 2] = vector_value[2] + scalar_value += 1 + vector_value = [x + 1 for x in vector_value] + + # Define Gauss points on entire domain as coupling mesh + vertex_ids = interface.set_mesh_vertices(read_mesh_id, coords) + + read_data_ids = dict() + # coupling data + for name, dim in read_data_names.items(): + read_data_ids[name] = interface.get_data_id(name, read_mesh_id) + + write_data_ids = dict() + for name, dim in write_data_names.items(): + write_data_ids[name] = interface.get_data_id(name, write_mesh_id) + + # initialize preCICE + dt = interface.initialize() + + # Set initial data to write to preCICE + if interface.is_action_required(precice.action_write_initial_data()): + for name, dim in write_data_names.items(): + if dim == 0: + interface.write_block_scalar_data(write_data_ids[name], vertex_ids, write_scalar_data) + elif dim == 1: + interface.write_block_vector_data(write_data_ids[name], vertex_ids, write_vector_data) + interface.mark_action_fulfilled(precice.action_write_initial_data()) + + interface.initialize_data() + + # time loop + while interface.is_coupling_ongoing(): + # write checkpoint + if interface.is_action_required(precice.action_write_iteration_checkpoint()): + print("Saving macro state") + t_checkpoint = t + n_checkpoint = n + interface.mark_action_fulfilled(precice.action_write_iteration_checkpoint()) + + # Read data from preCICE + for name, dim in read_data_names.items(): + if dim == 0: + read_scalar_data = interface.read_block_scalar_data(read_data_ids[name], vertex_ids) + elif dim == 1: + read_vector_data = interface.read_block_vector_data(read_data_ids[name], vertex_ids) + + # Set the read data as the write data with an increment + write_scalar_data = read_scalar_data + 1 + write_vector_data = read_vector_data + 1 + + # Write data to preCICE + for name, dim in write_data_names.items(): + if dim == 0: + interface.write_block_scalar_data(write_data_ids[name], vertex_ids, write_scalar_data) + elif dim == 1: + interface.write_block_vector_data(write_data_ids[name], vertex_ids, write_vector_data) + + # do the coupling + dt = interface.advance(dt) + + # advance variables + n += 1 + t += dt + + if interface.is_action_required(precice.action_read_iteration_checkpoint()): + print("Reverting to old macro state") + t = t_checkpoint + n = n_checkpoint + interface.mark_action_fulfilled(precice.action_read_iteration_checkpoint()) + + interface.finalize() + + +if __name__ == '__main__': + main() diff --git a/tests/integration/test_adaptivity/micro-manager-config.json b/tests/integration/test_adaptivity/micro-manager-config.json new file mode 100644 index 00000000..6814af18 --- /dev/null +++ b/tests/integration/test_adaptivity/micro-manager-config.json @@ -0,0 +1,20 @@ +{ + "micro_file_name": "micro_solver", + "coupling_params": { + "config_file_name": "precice-config.xml", + "macro_mesh_name": "macro-cube-mesh", + "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, + "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0, 1, 0, 1, 0, 1], + "adaptivity": "True", + "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], + "adaptivity_history_param": 0.5, + "adaptivity_coarsening_constant": 0.3, + "adaptivity_refining_constant": 0.4 + }, + "diagnostics": { + "output_micro_sim_solve_time": "True" + } +} diff --git a/tests/integration/test_adaptivity/micro_solver.py b/tests/integration/test_adaptivity/micro_solver.py new file mode 100644 index 00000000..ad9640ab --- /dev/null +++ b/tests/integration/test_adaptivity/micro_solver.py @@ -0,0 +1,35 @@ +""" +Micro simulation +In this script we solve a dummy micro problem to just show the working of the macro-micro coupling +""" + + +class MicroSimulation: + + def __init__(self, sim_id): + """ + Constructor of MicroSimulation class. + """ + self._sim_id = sim_id + self._micro_scalar_data = None + self._micro_vector_data = None + self._checkpoint = None + + def initialize(self): + self._micro_scalar_data = 0 + self._micro_vector_data = [] + self._checkpoint = 0 + + def solve(self, macro_data, dt): + assert dt != 0 + self._micro_vector_data = macro_data["macro-vector-data"] + 1 + self._micro_scalar_data = macro_data["macro-scalar-data"] + 1 + + return {"micro-scalar-data": self._micro_scalar_data, + "micro-vector-data": self._micro_vector_data} + + def save_checkpoint(self): + self._checkpoint = self._micro_scalar_data + + def reload_checkpoint(self): + self._micro_scalar_data = self._checkpoint diff --git a/tests/integration/test_adaptivity/precice-config.xml b/tests/integration/test_adaptivity/precice-config.xml new file mode 100644 index 00000000..b8905f50 --- /dev/null +++ b/tests/integration/test_adaptivity/precice-config.xml @@ -0,0 +1,59 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/integration/test_adaptivity/run_micro_manager.py b/tests/integration/test_adaptivity/run_micro_manager.py new file mode 100644 index 00000000..bfb69ab5 --- /dev/null +++ b/tests/integration/test_adaptivity/run_micro_manager.py @@ -0,0 +1,11 @@ +""" +Script to run the Micro Manager +""" + +from micro_manager import MicroManager + +manager = MicroManager("./micro-manager-config.json") + +manager.initialize() + +manager.solve() From 1d8753b239619e8b4c5fbbb96c62759364d1ed24 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 25 Jan 2023 17:57:52 +0100 Subject: [PATCH 02/87] Minor corrections in logging --- micro_manager/adaptivity.py | 2 +- micro_manager/micro_manager.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index 0f43f99d..d82d3579 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -144,7 +144,7 @@ def update_inactive_micro_sims( return _micro_sim_states - def associate_inactive_to_active(self, similarity_dists, micro_sim_states, micro_sims): + def associate_inactive_to_active(self, similarity_dists: np.ndarray, micro_sim_states: np.ndarray, micro_sims: list) -> None: """ Associate inactive micro simulations to most similar active micro simulation. diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index db64cb5a..ed876075 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -425,8 +425,7 @@ def solve_micro_simulations(self, micro_sims_input: dict, similarity_dists_nm1: self._logger.info( "Micro sim [{}] is inactive. Copying data from most similar active micro " "sim [{}]".format( - self._micro_sims[i].get_global_id(), - self._micro_sims[i].get_most_similar_active_id())) + self._micro_sims[i].get_global_id(), self._micro_sim_global_ids[self._micro_sims[i].get_most_similar_active_id()])) micro_sims_output[i] = dict() for dname, values in micro_sims_output[self._micro_sims[i].get_most_similar_active_id()].items(): From d1d67382e32c0e60a531f78bc4a82b29976e3b6c Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 25 Jan 2023 17:59:33 +0100 Subject: [PATCH 03/87] Formatting --- micro_manager/adaptivity.py | 6 +++++- micro_manager/micro_manager.py | 6 ++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index d82d3579..b0b53398 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -144,7 +144,11 @@ def update_inactive_micro_sims( return _micro_sim_states - def associate_inactive_to_active(self, similarity_dists: np.ndarray, micro_sim_states: np.ndarray, micro_sims: list) -> None: + def associate_inactive_to_active( + self, + similarity_dists: np.ndarray, + micro_sim_states: np.ndarray, + micro_sims: list) -> None: """ Associate inactive micro simulations to most similar active micro simulation. diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index ed876075..c579b190 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -422,10 +422,8 @@ def solve_micro_simulations(self, micro_sims_input: dict, similarity_dists_nm1: # For each inactive simulation, copy data from most similar active simulation for i in inactive_sim_ids: - self._logger.info( - "Micro sim [{}] is inactive. Copying data from most similar active micro " - "sim [{}]".format( - self._micro_sims[i].get_global_id(), self._micro_sim_global_ids[self._micro_sims[i].get_most_similar_active_id()])) + self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro " "sim [{}]".format( + self._micro_sims[i].get_global_id(), self._micro_sim_global_ids[self._micro_sims[i].get_most_similar_active_id()])) micro_sims_output[i] = dict() for dname, values in micro_sims_output[self._micro_sims[i].get_most_similar_active_id()].items(): From cec8910d8cefbdb32465b520821e9a1069219414 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 16 Feb 2023 11:15:28 +0100 Subject: [PATCH 04/87] Get adaptivity to work on a per rank basis and add unit tests for the same (#21) * Refactoring adaptivity functions into smaller and more readable functions. Adding unit test framework * Add unit test for get_similarity_dists function * Add unit tests for all functions related to adaptivity * Changing apt-get command flag and formatting * Add option to control if adaptivity is computed in every implicit iteration * Formatting * Remove unnecessary input parameter to output() functionality of micro sim * Get active and inactive micro sims more efficiently * Adaptivity seems to be working. Several bugs have been resolved * Cleaning code * Add new output variable adaptivity steps to count how many time steps is a micro sim active in * Add docstrings * Fixing several bugs for use when adaptivity is off * Solving bug for when adaptivity is not used, and some formatting * Fix incorrect variable name in MicroProblem class functions * Move the step to make all sims inactive outside of the time loop (obviously) * Adding docstrings for internal functions of adaptivity * Formatting * Add README for macro-micro-dummy and modfiy adaptivity part of main README --- .github/workflows/run-adaptivity-test.yml | 36 +++- README.md | 1 + examples/macro-micro-dummy/README.md | 30 ++- micro_manager/adaptivity.py | 124 +++++++---- micro_manager/config.py | 26 ++- micro_manager/micro_manager.py | 196 +++++++++++------- .../test_adaptivity/macro_solver.py | 2 +- .../test_adaptivity/micro-manager-config.json | 3 +- .../test_adaptivity/precice-config.xml | 5 +- tests/unit/__init__.py | 0 tests/unit/test_adaptivity.py | 163 +++++++++++++++ tests/unit/test_adaptivity_config.json | 18 ++ 12 files changed, 481 insertions(+), 123 deletions(-) create mode 100644 tests/unit/__init__.py create mode 100644 tests/unit/test_adaptivity.py create mode 100644 tests/unit/test_adaptivity_config.json diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index 2b012902..66dbdca0 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -1,4 +1,4 @@ -name: Run integration test for adaptivity +name: Run tests for adaptivity on: push: branches: @@ -7,22 +7,40 @@ on: branches: - "*" jobs: - run_test_adaptivity: - name: Test adaptivity + adaptivity_integration_test: + name: Run adaptivity integration test runs-on: ubuntu-latest container: precice/precice steps: - - name: Checkout Repository - uses: actions/checkout@v2 - - name: Install Dependencies + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install dependencies run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config python -m pip install --upgrade pip - pip install setuptools wheel twine - - name: Install micro-manager + pip install setuptools wheel + - name: Install Micro Manager run: pip3 install --user . - - name: Run adaptivity test + - name: Run integration test run: | cd tests/integration/test_adaptivity/ python3 macro_solver.py & python3 run_micro_manager.py + + adaptivity_unit_tests: + name: Run adaptivity unit tests + runs-on: ubuntu-latest + container: precice/precice + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + - name: Install Dependencies + run: | + apt-get -qq update + apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config + python -m pip install --upgrade pip + pip install setuptools wheel + - name: Install Micro Manager + run: pip3 install --user . + - name: Run unit tests + run: python3 -m unittest diff --git a/README.md b/README.md index e0a14ec0..56477382 100644 --- a/README.md +++ b/README.md @@ -150,6 +150,7 @@ To turn on adaptivity, the following options need to be set in `simulation_param * `adaptivity_history_param`: History parameter $\Lambda$, set as $\Lambda >= 0$. * `adaptivity_coarsening_constant`: Coarsening constant $C_c$, set as $C_c < 1$. * `adaptivity_refining_constant`: Refining constant $C_r$, set as $C_r >= 0$. +* `adaptivity_every_implicit_iteration`: Set as `True` if adaptivity calculation is to be done in every implicit iteration. Setting `False` would lead to adaptivity being calculated once at the start of the time window and then reused in every implicit time iteration. All variables names are chosen to be same as the second publication mentioned above. diff --git a/examples/macro-micro-dummy/README.md b/examples/macro-micro-dummy/README.md index eb1ae458..e3cd3eb9 100644 --- a/examples/macro-micro-dummy/README.md +++ b/examples/macro-micro-dummy/README.md @@ -1 +1,29 @@ -... +# Install Dependencies + +* [preCICE](https://github.com/precice/precice) +* [pyprecice](https://github.com/precice/python-bindings) +* [Micro Manager](https://github.com/precice/micro-manager) + +# Run + +Run the dummy macro solver by running + +```bash +python3 macro_solver.py +``` + +Run the Micro Manager by running + +```bash +micro_manager micro-manager-config.json +``` + +or + +```bash +python3 run_micro_manager.py +``` + +# Next Steps + +If you want to couple any other solver against this dummy solver be sure to adjust the preCICE configuration (participant names, mesh names, data names etc.) to the needs of your solver, compare our [step-by-step guide for new adapters](https://github.com/precice/precice/wiki/Adapter-Example). diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index b0b53398..4ff3dfd2 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -10,9 +10,18 @@ def __init__(self, configurator) -> None: # Names of data to be used for adaptivity computation self._refine_const = configurator.get_adaptivity_refining_const() self._coarse_const = configurator.get_adaptivity_coarsening_const() - self._number_of_sims = None + self._number_of_sims = 0 + self._coarse_tol = 0.0 - def set_number_of_sims(self, number_of_sims) -> None: + def set_number_of_sims(self, number_of_sims: int) -> None: + """ + Setting number of simulations for the AdaptiveController object. + + Parameters + ---------- + number_of_sims : int + Number of micro simulations + """ self._number_of_sims = number_of_sims def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np.ndarray) -> np.ndarray: @@ -66,7 +75,8 @@ def update_active_micro_sims( micro_sim_states: np.ndarray, micro_sims: list) -> np.ndarray: """ - Update set of active micro simulations. Active micro simulations are compared to each other and if found similar, one of them is deactivated. + Update set of active micro simulations. Active micro simulations are compared to each other + and if found similar, one of them is deactivated. Parameters ---------- @@ -82,32 +92,53 @@ def update_active_micro_sims( _micro_sim_states : numpy array Updated 1D array having state (active or inactive) of each micro simulation """ - coarse_tol = self._coarse_const * self._refine_const * np.amax(similarity_dists) + self._coarse_tol = self._coarse_const * self._refine_const * np.amax(similarity_dists) - _micro_sim_states = np.copy(micro_sim_states) + _micro_sim_states = np.copy(micro_sim_states) # Input micro_sim_states is not longer used after this point # Update the set of active micro sims - for id_1 in range(self._number_of_sims): - if _micro_sim_states[id_1]: # if id_1 sim is active - for id_2 in range(self._number_of_sims): - if _micro_sim_states[id_2]: # if id_2 is active - if id_1 != id_2: # don't compare active sim to itself - # If active sim is similar to another active sim, - # deactivate it - if similarity_dists[id_1, id_2] < coarse_tol: - micro_sims[id_1].deactivate() - _micro_sim_states[id_1] = 0 - break + for i in range(self._number_of_sims): + if _micro_sim_states[i]: # if sim is active + if self._check_for_deactivation(i, similarity_dists, _micro_sim_states): + micro_sims[i].deactivate() + _micro_sim_states[i] = 0 return _micro_sim_states + def _check_for_deactivation( + self, + active_id: int, + similarity_dists: np.ndarray, + micro_sim_states: np.ndarray) -> bool: + """ + Function to check if an active simulation needs to be deactivated + + Parameters + ---------- + active_id : int + ID of active simulation which is checked for deactivation + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation + """ + active_sim_ids = np.where(micro_sim_states == 1)[0] + + for active_id_2 in active_sim_ids: + if active_id != active_id_2: # don't compare active sim to itself + # If active sim is similar to another active sim, deactivate it + if similarity_dists[active_id, active_id_2] < self._coarse_tol: + return True + return False + def update_inactive_micro_sims( self, similarity_dists: np.ndarray, micro_sim_states: np.ndarray, micro_sims: list) -> np.ndarray: """ - Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones and if it is not similar to any of them, it is activated. + Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones + and if it is not similar to any of them, it is activated. Parameters ---------- @@ -123,27 +154,50 @@ def update_inactive_micro_sims( _micro_sim_states : numpy array Updated 1D array having state (active or inactive) of each micro simulation """ - ref_tol = self._refine_const * np.amax(similarity_dists) + self._ref_tol = self._refine_const * np.amax(similarity_dists) - _micro_sim_states = np.copy(micro_sim_states) + _micro_sim_states = np.copy(micro_sim_states) # Input micro_sim_states is not longer used after this point if not np.any(_micro_sim_states): + micro_sims[0].activate() _micro_sim_states[0] = 1 # If all sims are inactive, activate the first one (a random choice) # Update the set of inactive micro sims - for id_1 in range(self._number_of_sims): - dists = [] - if not _micro_sim_states[id_1]: # if id_1 is inactive - for id_2 in range(self._number_of_sims): - if _micro_sim_states[id_2]: # if id_2 is active - dists.append(similarity_dists[id_1, id_2]) - # If inactive sim is not similar to any active sim, activate it - if min(dists) > ref_tol: - micro_sims[id_1].activate() - _micro_sim_states[id_1] = 1 + for i in range(self._number_of_sims): + if not _micro_sim_states[i]: # if id is inactive + if self._check_for_activation(i, similarity_dists, _micro_sim_states): + micro_sims[i].activate() + _micro_sim_states[i] = 1 return _micro_sim_states + def _check_for_activation( + self, + inactive_id: int, + similarity_dists: np.ndarray, + micro_sim_states: np.ndarray) -> bool: + """ + Function to check if an inactive simulation needs to be activated + + Parameters + ---------- + inactive_id : int + ID of inactive simulation which is checked for activation + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation + """ + dists = [] + + active_sim_ids = np.where(micro_sim_states == 1)[0] + + for active_id in active_sim_ids: + dists.append(similarity_dists[inactive_id, active_id]) + + # If inactive sim is not similar to any active sim, activate it + return min(dists) > self._ref_tol + def associate_inactive_to_active( self, similarity_dists: np.ndarray, @@ -165,11 +219,11 @@ def associate_inactive_to_active( inactive_sim_ids = np.where(micro_sim_states == 0)[0] # Associate inactive micro sims to active micro sims - for id_1 in inactive_sim_ids: + for inactive_id in inactive_sim_ids: dist_min = sys.float_info.max - for id_2 in active_sim_ids: + for active_id in active_sim_ids: # Find most similar active sim for every inactive sim - if similarity_dists[id_1, id_2] < dist_min: - micro_id = id_2 - dist_min = similarity_dists[id_1, id_2] - micro_sims[id_1].is_most_similar_to(micro_id) + if similarity_dists[inactive_id, active_id] < dist_min: + most_similar_active_id = active_id + dist_min = similarity_dists[inactive_id, active_id] + micro_sims[inactive_id].is_most_similar_to(most_similar_active_id) diff --git a/micro_manager/config.py b/micro_manager/config.py index 13a6e5a8..2b431602 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -40,6 +40,7 @@ def __init__(self, config_filename): self._adaptivity_history_param = 0.5 self._adaptivity_coarsening_constant = 0.5 self._adaptivity_refining_constant = 0.5 + self._adaptivity_every_implicit_iteration = False self.read_json(config_filename) @@ -104,7 +105,12 @@ def read_json(self, config_filename): "in every time window.") try: - self._adaptivity = data["simulation_params"]["adaptivity"] + adaptivity = data["simulation_params"]["adaptivity"] + + if adaptivity == "True": + self._adaptivity = True + elif adaptivity == "False": + self._adaptivity = False exchange_data = {**self._read_data_names, **self._write_data_names} for dname in data["simulation_params"]["adaptivity_data"]: @@ -113,8 +119,18 @@ def read_json(self, config_filename): self._adaptivity_history_param = data["simulation_params"]["adaptivity_history_param"] self._adaptivity_coarsening_constant = data["simulation_params"]["adaptivity_coarsening_constant"] self._adaptivity_refining_constant = data["simulation_params"]["adaptivity_refining_constant"] + adaptivity_every_implicit_iteration = data["simulation_params"]["adaptivity_every_implicit_iteration"] + + if adaptivity_every_implicit_iteration == "True": + self._adaptivity_every_implicit_iteration = True + elif adaptivity_every_implicit_iteration == "False": + self._adaptivity_every_implicit_iteration = False + + if not self._adaptivity_every_implicit_iteration: + print("Micro Manager will compute adaptivity once at the start of every time window") self._write_data_names["active_state"] = False + self._write_data_names["active_steps"] = False except BaseException: print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations " "in all time steps.") @@ -279,3 +295,11 @@ def get_adaptivity_refining_const(self): """ return self._adaptivity_refining_constant + + def is_adaptivity_required_in_every_implicit_iteration(self): + """ + + Returns + ------- + """ + return self._adaptivity_every_implicit_iteration diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index c579b190..429e0067 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -39,7 +39,7 @@ def __init__(self, local_id, global_id): self._local_id = local_id self._global_id = global_id self._is_active = False - self._most_similar_active_local_id = 0 + self._most_similar_active_local_id = None def get_local_id(self): return self._local_id @@ -54,12 +54,12 @@ def deactivate(self): self._is_active = False def is_most_similar_to(self, similar_active_local_id): - assert self._is_active is False, "Micro simulation {} is active and hence cannot be most similar to another active simulation".format( + assert not self._is_active, "Micro simulation {} is active and hence cannot be most similar to another active simulation".format( self._global_id) - self._most_similar_active_id = similar_active_local_id + self._most_similar_active_local_id = similar_active_local_id def get_most_similar_active_id(self): - assert self._is_active is False, "Micro simulation {} is active and hence cannot have a most similar active id".format( + assert not self._is_active, "Micro simulation {} is active and hence cannot have a most similar active id".format( self._global_id) return self._most_similar_active_local_id @@ -70,7 +70,7 @@ def is_active(self): class MicroManager: - def __init__(self, config_file) -> None: + def __init__(self, config_file: str) -> None: """ Constructor of MicroManager class. @@ -151,7 +151,10 @@ def __init__(self, config_file) -> None: if name in self._write_data_names: self._adaptivity_micro_data_names[name] = is_data_vector - def decompose_macro_domain(self, macro_bounds) -> list: + self._is_adaptivity_required_in_every_implicit_iteration = config.is_adaptivity_required_in_every_implicit_iteration() + self._micro_sims_active_steps = None + + def decompose_macro_domain(self, macro_bounds: list) -> list: """ Decompose the macro domain equally among all ranks, if the Micro Manager is run in parallel. @@ -219,12 +222,15 @@ def initialize(self) -> None: self._local_number_of_micro_sims, _ = mesh_vertex_coords.shape self._logger.info("Number of local micro simulations = {}".format(self._local_number_of_micro_sims)) - for name, is_data_vector in self._adaptivity_data_names.items(): - if is_data_vector: - self._data_used_for_adaptivity[name] = np.zeros( - (self._local_number_of_micro_sims, self._interface.get_dimensions())) - else: - self._data_used_for_adaptivity[name] = np.zeros((self._local_number_of_micro_sims)) + if self._is_adaptivity_on: + for name, is_data_vector in self._adaptivity_data_names.items(): + if is_data_vector: + self._data_used_for_adaptivity[name] = np.zeros( + (self._local_number_of_micro_sims, self._interface.get_dimensions())) + else: + self._data_used_for_adaptivity[name] = np.zeros((self._local_number_of_micro_sims)) + + self._adaptivity_controller.set_number_of_sims(self._local_number_of_micro_sims) if self._local_number_of_micro_sims == 0: if self._is_parallel: @@ -244,8 +250,6 @@ def initialize(self) -> None: # Get global number of micro simulations self._global_number_of_micro_sims = np.sum(nms_all_ranks) - self._adaptivity_controller.set_number_of_sims(self._local_number_of_micro_sims) - # Create all micro simulations sim_id = 0 if self._rank != 0: @@ -260,6 +264,7 @@ def initialize(self) -> None: sim_id += 1 micro_sims_output = list(range(self._local_number_of_micro_sims)) + self._micro_sims_active_steps = np.zeros(self._local_number_of_micro_sims) # Initialize micro simulations if initialize() method exists if hasattr(self._micro_problem, 'initialize') and callable(getattr(self._micro_problem, 'initialize')): @@ -270,6 +275,7 @@ def initialize(self) -> None: micro_sims_output[i]["micro_sim_time"] = 0.0 if self._is_adaptivity_on: micro_sims_output[i]["active_state"] = 0 + micro_sims_output[i]["active_steps"] = 0 else: micro_sims_output[i] = dict() for name, is_data_vector in self._write_data_names.items(): @@ -310,11 +316,11 @@ def read_data_from_precice(self) -> list: if is_data_vector: read_data.update({name: self._interface.read_block_vector_data( self._read_data_ids[name], self._mesh_vertex_ids)}) - if name in self._adaptivity_macro_data_names: - self._data_used_for_adaptivity[name] = read_data[name] else: read_data.update({name: self._interface.read_block_scalar_data( self._read_data_ids[name], self._mesh_vertex_ids)}) + + if self._is_adaptivity_on: if name in self._adaptivity_macro_data_names: self._data_used_for_adaptivity[name] = read_data[name] @@ -336,8 +342,8 @@ def write_data_to_precice(self, micro_sims_output: list) -> None: for name in micro_sims_output[0]: write_data[name] = [] - for i in range(self._local_number_of_micro_sims): - for name, values in micro_sims_output[i].items(): + for output_dict in micro_sims_output: + for name, values in output_dict.items(): write_data[name].append(values) for dname, is_data_vector in self._write_data_names.items(): @@ -356,8 +362,46 @@ def write_data_to_precice(self, micro_sims_output: list) -> None: self._interface.write_block_scalar_data( self._write_data_ids[dname], [], np.array([])) - def solve_micro_simulations(self, micro_sims_input: dict, similarity_dists_nm1: np.ndarray, - micro_sim_states_nm1: np.ndarray): + def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_nm1: np.ndarray): + """ + Compute adaptivity based on similartiy distances and micro simulation states from t_{n-1} + + Parameters + ---------- + + similarity_dists_nm1 : numpy array + 2D array having similarity distances between each micro simulation pair at t_{n-1} + micro_sim_states_nm1 : numpy array + 1D array having state (active or inactive) of each micro simulation at t_{n-1} + + Results + ------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair at t_{n} + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation at t_{n} + """ + # Multiply old similarity distance by history term to get current distances + similarity_dists_n = exp(-self._hist_param * self._dt) * similarity_dists_nm1 + + for name, _ in self._adaptivity_data_names.items(): + similarity_dists_n = self._adaptivity_controller.get_similarity_dists( + self._dt, similarity_dists_n, self._data_used_for_adaptivity[name]) + + micro_sim_states_n = self._adaptivity_controller.update_active_micro_sims( + similarity_dists_n, micro_sim_states_nm1, self._micro_sims) + + micro_sim_states_n = self._adaptivity_controller.update_inactive_micro_sims( + similarity_dists_n, micro_sim_states_n, self._micro_sims) + + self._adaptivity_controller.associate_inactive_to_active( + similarity_dists_n, micro_sim_states_n, self._micro_sims) + + assert np.any(micro_sim_states_n), "There are no active simulations, which is not possible." + + return similarity_dists_n, micro_sim_states_n + + def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.ndarray) -> list: """ Solve all micro simulations using the data read from preCICE and assemble the micro simulations outputs in a list of dicts format. @@ -367,6 +411,8 @@ def solve_micro_simulations(self, micro_sims_input: dict, similarity_dists_nm1: micro_sims_input : list List of dicts in which keys are names of data and the values are the data which are required inputs to solve a micro simulation. + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation Returns ------- @@ -374,72 +420,52 @@ def solve_micro_simulations(self, micro_sims_input: dict, similarity_dists_nm1: List of dicts in which keys are names of data and the values are the data of the output of the micro simulations. """ - if self._is_adaptivity_on: - # Multiply old similarity distance by history term to get current distances - similarity_dists_n = exp(-self._hist_param * self._dt) * similarity_dists_nm1 - - for name, _ in self._adaptivity_data_names.items(): - similarity_dists_n = self._adaptivity_controller.get_similarity_dists( - self._dt, similarity_dists_n, self._data_used_for_adaptivity[name]) - - micro_sim_states_n = self._adaptivity_controller.update_active_micro_sims( - similarity_dists_n, micro_sim_states_nm1, self._micro_sims) - - micro_sim_states_n = self._adaptivity_controller.update_inactive_micro_sims( - similarity_dists_n, micro_sim_states_n, self._micro_sims) - - self._adaptivity_controller.associate_inactive_to_active( - similarity_dists_n, micro_sim_states_n, self._micro_sims) - - active_sim_ids = np.where(micro_sim_states_n == 1)[0] - inactive_sim_ids = np.where(micro_sim_states_n == 0)[0] - - else: - # If adaptivity is off, all micro simulations are active - active_sim_ids = np.where(micro_sim_states_nm1 == 1)[0] - inactive_sim_ids = np.where(micro_sim_states_nm1 == 0)[0] + active_sim_ids = np.where(micro_sim_states == 1)[0] + inactive_sim_ids = np.where(micro_sim_states == 0)[0] micro_sims_output = list(range(self._local_number_of_micro_sims)) # Solve all active micro simulations - for i in active_sim_ids: - self._logger.info("Solving active micro sim [{}]".format(self._micro_sims[i].get_global_id())) + for active_id in active_sim_ids: + self._logger.info("Solving active micro sim [{}]".format(self._micro_sims[active_id].get_global_id())) start_time = time.time() - micro_sims_output[i] = self._micro_sims[i].solve(micro_sims_input[i], self._dt) + micro_sims_output[active_id] = self._micro_sims[active_id].solve(micro_sims_input[active_id], self._dt) end_time = time.time() if self._is_adaptivity_on: # Mark the micro sim as active for export - micro_sims_output[i]["active_state"] = 1 + micro_sims_output[active_id]["active_state"] = 1 + micro_sims_output[active_id]["active_steps"] = self._micro_sims_active_steps[active_id] - for name in self._adaptivity_micro_data_names: - # Collect micro sim output for adaptivity - self._data_used_for_adaptivity[name][i] = micro_sims_output[i][name] + for name in self._adaptivity_micro_data_names: + # Collect micro sim output for adaptivity + self._data_used_for_adaptivity[name][active_id] = micro_sims_output[active_id][name] if self._is_micro_solve_time_required: - micro_sims_output[i]["micro_sim_time"] = end_time - start_time + micro_sims_output[active_id]["micro_sim_time"] = end_time - start_time # For each inactive simulation, copy data from most similar active simulation - for i in inactive_sim_ids: - self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro " "sim [{}]".format( - self._micro_sims[i].get_global_id(), self._micro_sim_global_ids[self._micro_sims[i].get_most_similar_active_id()])) + for inactive_id in inactive_sim_ids: + self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro sim [{}]".format( + self._micro_sims[inactive_id].get_global_id(), self._micro_sim_global_ids[self._micro_sims[inactive_id].get_most_similar_active_id()])) - micro_sims_output[i] = dict() - for dname, values in micro_sims_output[self._micro_sims[i].get_most_similar_active_id()].items(): - micro_sims_output[i][dname] = values + micro_sims_output[inactive_id] = dict() + for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_most_similar_active_id()].items(): + micro_sims_output[inactive_id][dname] = values - start_time = end_time = 0 - micro_sims_output[i]["active_state"] = 0 + if self._is_adaptivity_on: + for name in self._adaptivity_micro_data_names: + # Collect micro sim output for adaptivity + self._data_used_for_adaptivity[name][inactive_id] = micro_sims_output[inactive_id][name] - for name in self._adaptivity_micro_data_names: - # Collect micro sim output for adaptivity - self._data_used_for_adaptivity[name][i] = micro_sims_output[i][name] + micro_sims_output[inactive_id]["active_state"] = 0 + micro_sims_output[inactive_id]["active_steps"] = self._micro_sims_active_steps[inactive_id] if self._is_micro_solve_time_required: - micro_sims_output[i]["micro_sim_time"] = end_time - start_time + micro_sims_output[inactive_id]["micro_sim_time"] = 0 - return micro_sims_output, similarity_dists_n, micro_sim_states_n + return micro_sims_output def solve(self): """ @@ -448,13 +474,17 @@ def solve(self): t, n = 0, 0 t_checkpoint, n_checkpoint = 0, 0 similarity_dists = np.zeros((self._local_number_of_micro_sims, self._local_number_of_micro_sims)) - micro_sim_states = np.zeros((self._local_number_of_micro_sims)) + micro_sim_states = np.ones((self._local_number_of_micro_sims)) # Start with all active simulations + + if self._is_adaptivity_on: + # Start adaptivity calculation with all sims inactive + micro_sim_states = np.zeros((self._local_number_of_micro_sims)) similarity_dists_cp = None micro_sim_states_cp = None + micro_sims_cp = None while self._interface.is_coupling_ongoing(): - # Write checkpoints for all micro simulations if self._interface.is_action_required(precice.action_write_iteration_checkpoint()): for micro_sim in self._micro_sims: micro_sim.save_checkpoint() @@ -462,16 +492,32 @@ def solve(self): n_checkpoint = n if self._is_adaptivity_on: - similarity_dists_cp = similarity_dists - micro_sim_states_cp = micro_sim_states + if not self._is_adaptivity_required_in_every_implicit_iteration: + similarity_dists, micro_sim_states = self.compute_adaptivity(similarity_dists, micro_sim_states) + + # Only do checkpointing if adaptivity is computed once in every time window + similarity_dists_cp = np.copy(similarity_dists) + micro_sim_states_cp = np.copy(micro_sim_states) + micro_sims_cp = self._micro_sims.copy() + + active_sim_ids = np.where(micro_sim_states == 1)[0] + for active_id in active_sim_ids: + self._micro_sims_active_steps[active_id] += 1 self._interface.mark_action_fulfilled( precice.action_write_iteration_checkpoint()) micro_sims_input = self.read_data_from_precice() - micro_sims_output, similarity_dists, micro_sim_states = self.solve_micro_simulations( - micro_sims_input, similarity_dists, micro_sim_states) + if self._is_adaptivity_on: + if self._is_adaptivity_required_in_every_implicit_iteration: + similarity_dists, micro_sim_states = self.compute_adaptivity(similarity_dists, micro_sim_states) + + active_sim_ids = np.where(micro_sim_states == 1)[0] + for active_id in active_sim_ids: + self._micro_sims_active_steps[active_id] += 1 + + micro_sims_output = self.solve_micro_simulations(micro_sims_input, micro_sim_states) self.write_data_to_precice(micro_sims_output) @@ -488,8 +534,10 @@ def solve(self): t = t_checkpoint if self._is_adaptivity_on: - similarity_dists = similarity_dists_cp - micro_sim_states = micro_sim_states_cp + if not self._is_adaptivity_required_in_every_implicit_iteration: + similarity_dists = np.copy(similarity_dists_cp) + micro_sim_states = np.copy(micro_sim_states_cp) + self._micro_sims = micro_sims_cp.copy() self._interface.mark_action_fulfilled( precice.action_read_iteration_checkpoint()) @@ -500,7 +548,7 @@ def solve(self): if self._micro_sims_have_output: if n % self._micro_n_out == 0: for micro_sim in self._micro_sims: - micro_sim.output(n) + micro_sim.output() self._interface.finalize() diff --git a/tests/integration/test_adaptivity/macro_solver.py b/tests/integration/test_adaptivity/macro_solver.py index f5649bd0..5d607c06 100644 --- a/tests/integration/test_adaptivity/macro_solver.py +++ b/tests/integration/test_adaptivity/macro_solver.py @@ -24,7 +24,7 @@ def main(): write_data_names = {"macro-scalar-data": 0, "macro-vector-data": 1} # Coupling mesh - unit cube with 5 points in each direction - np_axis = 4 + np_axis = 5 x_coords, y_coords, z_coords = np.meshgrid( np.linspace(0, 1, np_axis), np.linspace(0, 1, np_axis), diff --git a/tests/integration/test_adaptivity/micro-manager-config.json b/tests/integration/test_adaptivity/micro-manager-config.json index 6814af18..c2e86fb5 100644 --- a/tests/integration/test_adaptivity/micro-manager-config.json +++ b/tests/integration/test_adaptivity/micro-manager-config.json @@ -12,7 +12,8 @@ "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], "adaptivity_history_param": 0.5, "adaptivity_coarsening_constant": 0.3, - "adaptivity_refining_constant": 0.4 + "adaptivity_refining_constant": 0.4, + "adaptivity_every_implicit_iteration": "True" }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/tests/integration/test_adaptivity/precice-config.xml b/tests/integration/test_adaptivity/precice-config.xml index b8905f50..aad23097 100644 --- a/tests/integration/test_adaptivity/precice-config.xml +++ b/tests/integration/test_adaptivity/precice-config.xml @@ -14,6 +14,7 @@ + @@ -22,6 +23,7 @@ + @@ -40,6 +42,7 @@ + @@ -47,7 +50,7 @@ - + diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/test_adaptivity.py b/tests/unit/test_adaptivity.py new file mode 100644 index 00000000..6d4c306d --- /dev/null +++ b/tests/unit/test_adaptivity.py @@ -0,0 +1,163 @@ +from unittest import TestCase +from micro_manager.adaptivity import AdaptiveController +from micro_manager.config import Config +import numpy as np + + +class TestAdaptivity(TestCase): + + def setUp(self): + self._adaptivity_controller = AdaptiveController(Config("./tests/unit/test_adaptivity_config.json")) + self._number_of_sims = 5 + self._dt = 0.1 + self._dim = 3 + + self._micro_scalar_data = np.zeros(5) + np.put(self._micro_scalar_data, [0, 1, 2], [3.0, 3.0, 3.0]) + np.put(self._micro_scalar_data, [3, 4], [5.0, 5.0]) + + self._micro_vector_data = np.zeros((5, 3)) + # First three simulations have similar micro_vector_data + for i in range(3): + self._micro_vector_data[i, :] = 5.0 + + # Last two simulations have similar micro_vector_data + for i in range(3, 5): + self._micro_vector_data[i, :] = 10.0 + + self._macro_scalar_data = np.zeros(5) + np.put(self._micro_scalar_data, [0, 1, 2], [130.0, 130.0, 130.0]) + np.put(self._micro_scalar_data, [3, 4], [250.0, 250.0]) + + self._macro_vector_data = np.zeros((5, 3)) + # First three simulations have similar micro_vector_data + for i in range(3): + self._macro_vector_data[i, :] = 100.0 + + # Last two simulations have similar micro_vector_data + for i in range(3, 5): + self._macro_vector_data[i, :] = 300.0 + + # Adaptivity constants + self._refine_const = 0.5 + self._coarse_const = 0.5 + self._coarse_tol = 0.2 + + def test_set_number_of_sims(self): + self._adaptivity_controller.set_number_of_sims(self._number_of_sims) + self.assertEqual(self._number_of_sims, self._adaptivity_controller._number_of_sims) + + def test_get_similarity_dists(self): + self._adaptivity_controller._number_of_sims = self._number_of_sims + expected_similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) + for i in range(self._number_of_sims): + for j in range(self._number_of_sims): + similarity_dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) + similarity_dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) + for d in range(self._dim): + similarity_dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) + similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) + expected_similarity_dists[i, j] = self._dt * similarity_dist + + actual_similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) + actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( + self._dt, actual_similarity_dists, self._micro_scalar_data) + actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( + self._dt, actual_similarity_dists, self._micro_vector_data) + actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( + self._dt, actual_similarity_dists, self._macro_scalar_data) + actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( + self._dt, actual_similarity_dists, self._macro_vector_data) + + self.assertTrue(np.array_equal(expected_similarity_dists, actual_similarity_dists)) + + def test_update_active_micro_sims(self): + self._adaptivity_controller._number_of_sims = self._number_of_sims + # Third and fifth micro sim are active, rest are deactivate + expected_micro_sim_states = np.array([0, 0, 1, 0, 1]) + + similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) + for i in range(self._number_of_sims): + for j in range(self._number_of_sims): + similarity_dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) + similarity_dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) + for d in range(self._dim): + similarity_dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) + similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) + similarity_dists[i, j] = self._dt * similarity_dist + + actual_micro_sim_states = np.array([1, 1, 1, 1, 1]) # Activate all micro sims before calling functionality + + class MicroSimulation(): + def deactivate(self): + pass + + dummy_micro_sims = [] + for i in range(self._number_of_sims): + dummy_micro_sims.append(MicroSimulation()) + + actual_micro_sim_states = self._adaptivity_controller.update_active_micro_sims( + similarity_dists, actual_micro_sim_states, dummy_micro_sims) + + self.assertTrue(np.array_equal(expected_micro_sim_states, actual_micro_sim_states)) + + def test_update_inactive_micro_sims(self): + self._adaptivity_controller._number_of_sims = self._number_of_sims + # Third and fifth micro sim are active, rest are deactivate + expected_micro_sim_states = np.array([0, 1, 0, 1, 0]) + + similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) + for i in range(self._number_of_sims): + for j in range(self._number_of_sims): + similarity_dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) + similarity_dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) + for d in range(self._dim): + similarity_dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) + similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) + similarity_dists[i, j] = self._dt * similarity_dist + + actual_micro_sim_states = np.array([0, 1, 0, 0, 0]) # Activate all micro sims before calling functionality + + class MicroSimulation(): + def activate(self): + pass + + dummy_micro_sims = [] + for i in range(self._number_of_sims): + dummy_micro_sims.append(MicroSimulation()) + + actual_micro_sim_states = self._adaptivity_controller.update_inactive_micro_sims( + similarity_dists, actual_micro_sim_states, dummy_micro_sims) + + self.assertTrue(np.array_equal(expected_micro_sim_states, actual_micro_sim_states)) + + def test_associate_active_to_inactive(self): + self._adaptivity_controller._number_of_sims = self._number_of_sims + micro_sim_states = np.array([0, 0, 1, 0, 1]) + + similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) + for i in range(self._number_of_sims): + for j in range(self._number_of_sims): + similarity_dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) + similarity_dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) + for d in range(self._dim): + similarity_dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) + similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) + similarity_dists[i, j] = self._dt * similarity_dist + + class MicroSimulation(): + def is_most_similar_to(self, similar_active_id): + self._most_similar_active_id = similar_active_id + + def get_most_similar_active_id(self): + return self._most_similar_active_id + + dummy_micro_sims = [] + for i in range(self._number_of_sims): + dummy_micro_sims.append(MicroSimulation()) + + self._adaptivity_controller.associate_inactive_to_active(similarity_dists, micro_sim_states, dummy_micro_sims) + + self.assertEqual(dummy_micro_sims[0].get_most_similar_active_id(), 2) + self.assertEqual(dummy_micro_sims[1].get_most_similar_active_id(), 2) + self.assertEqual(dummy_micro_sims[3].get_most_similar_active_id(), 4) diff --git a/tests/unit/test_adaptivity_config.json b/tests/unit/test_adaptivity_config.json new file mode 100644 index 00000000..00755b63 --- /dev/null +++ b/tests/unit/test_adaptivity_config.json @@ -0,0 +1,18 @@ +{ + "micro_file_name": "dummy", + "coupling_params": { + "config_file_name": "dummy", + "macro_mesh_name": "dummy", + "read_data_names": {}, + "write_data_names": {} + }, + "simulation_params": { + "macro_domain_bounds": [], + "adaptivity": "True", + "adaptivity_data": [], + "adaptivity_history_param": 0.5, + "adaptivity_coarsening_constant": 0.3, + "adaptivity_refining_constant": 0.4, + "adaptivity_every_implicit_iteration": "False" + } +} From 24932e745ed3ca5c814a022ce370c97cc695d963 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 24 Feb 2023 16:18:38 +0100 Subject: [PATCH 05/87] Remove logging of each active and inactive micro sim id, and instead log only number of active and inactive sims --- micro_manager/micro_manager.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 429e0067..ebf34dc6 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -399,6 +399,9 @@ def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_ assert np.any(micro_sim_states_n), "There are no active simulations, which is not possible." + self._logger.info("Number of active micro simulations = {}".format(np.count_nonzero(micro_sim_states_n == 1))) + self._logger.info("Number of inactive micro simulations = {}".format(np.count_nonzero(micro_sim_states_n == 0))) + return similarity_dists_n, micro_sim_states_n def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.ndarray) -> list: @@ -427,7 +430,7 @@ def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.n # Solve all active micro simulations for active_id in active_sim_ids: - self._logger.info("Solving active micro sim [{}]".format(self._micro_sims[active_id].get_global_id())) + # self._logger.info("Solving active micro sim [{}]".format(self._micro_sims[active_id].get_global_id())) start_time = time.time() micro_sims_output[active_id] = self._micro_sims[active_id].solve(micro_sims_input[active_id], self._dt) @@ -447,8 +450,8 @@ def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.n # For each inactive simulation, copy data from most similar active simulation for inactive_id in inactive_sim_ids: - self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro sim [{}]".format( - self._micro_sims[inactive_id].get_global_id(), self._micro_sim_global_ids[self._micro_sims[inactive_id].get_most_similar_active_id()])) + # self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro sim [{}]".format( + # self._micro_sims[inactive_id].get_global_id(), self._micro_sim_global_ids[self._micro_sims[inactive_id].get_most_similar_active_id()])) micro_sims_output[inactive_id] = dict() for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_most_similar_active_id()].items(): From 8d3b1ccba97c5a05094e5623f7ed8381bdc7d0b8 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 24 Feb 2023 16:22:09 +0100 Subject: [PATCH 06/87] Formatting --- micro_manager/micro_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index ebf34dc6..4cc72079 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -451,7 +451,8 @@ def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.n # For each inactive simulation, copy data from most similar active simulation for inactive_id in inactive_sim_ids: # self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro sim [{}]".format( - # self._micro_sims[inactive_id].get_global_id(), self._micro_sim_global_ids[self._micro_sims[inactive_id].get_most_similar_active_id()])) + # self._micro_sims[inactive_id].get_global_id(), + # self._micro_sim_global_ids[self._micro_sims[inactive_id].get_most_similar_active_id()])) micro_sims_output[inactive_id] = dict() for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_most_similar_active_id()].items(): From 41f936864c48370d39726bf8d2144c50c2cf973d Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 6 Mar 2023 17:00:51 +0100 Subject: [PATCH 07/87] Simplify solver dummy configuration to avoid unexpected behavior --- examples/macro-micro-dummy/macro_dummy.py | 1 - examples/macro-micro-dummy/precice-config.xml | 25 +++++-------------- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/examples/macro-micro-dummy/macro_dummy.py b/examples/macro-micro-dummy/macro_dummy.py index 765ff7e3..fbc5b03d 100644 --- a/examples/macro-micro-dummy/macro_dummy.py +++ b/examples/macro-micro-dummy/macro_dummy.py @@ -73,7 +73,6 @@ def main(): n_checkpoint = n interface.mark_action_fulfilled(precice.action_write_iteration_checkpoint()) - # Read porosity and apply for name, dim in read_data_names.items(): if dim == 0: read_scalar_data = interface.read_block_scalar_data(read_data_ids[name], vertex_ids) diff --git a/examples/macro-micro-dummy/precice-config.xml b/examples/macro-micro-dummy/precice-config.xml index 3362ffc6..9c487c7c 100644 --- a/examples/macro-micro-dummy/precice-config.xml +++ b/examples/macro-micro-dummy/precice-config.xml @@ -41,31 +41,18 @@ - + - + - + - - + + - - - - - - - - - - - - - - + From ace86f04f1f5a46e0eecf2d242bbcfe8e6953889 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Mon, 6 Mar 2023 17:37:28 +0100 Subject: [PATCH 08/87] Add C++ bindings for manager API using pybind11 and also a corresponding solverdummy (#22) * Add c++ dummy and move files * Add readme * Modify test action and add c++ dummy * Add some documentation * Rename micro_cpp_dummy file * Update action with new filename * Apply suggestions from code review Co-authored-by: Ishaan Desai * Remove `get_dims` * Remove `_dims` * Move `micro-manager-config.json` to unify examples * Separate into c++ code into header file and code --------- Co-authored-by: Ishaan Desai --- .github/workflows/run-macro-micro-dummy.yml | 13 +++- .gitignore | 3 + examples/{macro-micro-dummy => }/.gitignore | 0 examples/README.md | 42 ++++++++++ .../{macro-micro-dummy => }/clean-example.sh | 0 examples/cpp-dummy/.gitignore | 11 +++ examples/cpp-dummy/micro_cpp_dummy.cpp | 78 +++++++++++++++++++ examples/cpp-dummy/micro_cpp_dummy.hpp | 29 +++++++ .../run_micro_manager.py | 2 +- examples/macro-micro-dummy/README.md | 29 ------- .../{macro-micro-dummy => }/macro_dummy.py | 0 .../micro-manager-config.json | 2 +- .../precice-config.xml | 0 examples/python-dummy/.gitignore | 8 ++ .../micro_dummy.py | 0 examples/python-dummy/run_micro_manager.py | 11 +++ 16 files changed, 194 insertions(+), 34 deletions(-) rename examples/{macro-micro-dummy => }/.gitignore (100%) create mode 100644 examples/README.md rename examples/{macro-micro-dummy => }/clean-example.sh (100%) create mode 100644 examples/cpp-dummy/.gitignore create mode 100644 examples/cpp-dummy/micro_cpp_dummy.cpp create mode 100644 examples/cpp-dummy/micro_cpp_dummy.hpp rename examples/{macro-micro-dummy => cpp-dummy}/run_micro_manager.py (68%) delete mode 100644 examples/macro-micro-dummy/README.md rename examples/{macro-micro-dummy => }/macro_dummy.py (100%) rename examples/{macro-micro-dummy => }/micro-manager-config.json (90%) rename examples/{macro-micro-dummy => }/precice-config.xml (100%) create mode 100644 examples/python-dummy/.gitignore rename examples/{macro-micro-dummy => python-dummy}/micro_dummy.py (100%) create mode 100644 examples/python-dummy/run_micro_manager.py diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 6a16a6ad..3ebd60ef 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -22,7 +22,14 @@ jobs: pip install setuptools wheel twine - name: Install micro-manager run: pip3 install --user . - - name: Run macro-micro dummy + - name: Run python macro-micro dummy run: | - cd examples/macro-micro-dummy/ - python3 macro_dummy.py & python3 run_micro_manager.py + cd examples/ + python3 macro_dummy.py & python3 python-dummy/run_micro_manager.py + - name: Run c++ macro-micro dummy + run: | + cd examples/cpp-dummy/ + pip install pybind11 + c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) + cd ../ + python3 macro_dummy.py & python3 cpp-dummy/run_micro_manager.py diff --git a/.gitignore b/.gitignore index e9ecfc7a..ec86acfa 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,6 @@ micro_manager.egg-info/ # Packaging artifacts dist + +# vscode +.vscode \ No newline at end of file diff --git a/examples/macro-micro-dummy/.gitignore b/examples/.gitignore similarity index 100% rename from examples/macro-micro-dummy/.gitignore rename to examples/.gitignore diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..4ee6babe --- /dev/null +++ b/examples/README.md @@ -0,0 +1,42 @@ +# Solverdummies + +The `solverdummies` are minimal working examples for using the preCICE Micro Manager with different languages. At the moment, there are examples for Python, and C++. They can be coupled with any other solver, for example the `macro-dummy.py` in this directory. + +## Python +To run the Python solverdummies, run the following commands in the `examples/` directory in **two different terminals**: + +```bash +python macro-dummy.py +python python-dummy/run_micro_manager.py +``` + +## C++ +The C++ solverdummies have to be compiled first using [`pybind11`](https://pybind11.readthedocs.io/en/stable/index.html). To do so, install `pybind11` using `pip`: + +```bash +pip install pybind11 +``` + +Then, run the following commands in the `cpp-dummy` directory: + +```bash +c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) +``` +
+Explanation + +The command above compiles the C++ solverdummy and creates a shared library that can be imported from python using `pybind11`. +- The `$(python3 -m pybind11 --includes)` part is necessary to include the correct header files for `pybind11`. +- The `$(python3-config --extension-suffix)` part is necessary to create the correct file extension for the shared library. For more information, see the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/compiling.html#building-manually). + +
+ +Then, run the following commands in the `examples/` directory, in **two different terminals**: + +```bash +python macro_dummy.py +python cpp-dummy/run_micro_manager.py +``` + +When changing the C++ solverdummy to your own solver, make sure to change the `PYBIND11_MODULE` in `micro_cpp_dummy.cpp` to the name that you want to compile to. +For example, if you want to import the module as `my_solver`, change the line to `PYBIND11_MODULE(my_solver, m) {`. Then, change the `micro_file_name` in `micro-manager-config.json` to `my_solver`. \ No newline at end of file diff --git a/examples/macro-micro-dummy/clean-example.sh b/examples/clean-example.sh similarity index 100% rename from examples/macro-micro-dummy/clean-example.sh rename to examples/clean-example.sh diff --git a/examples/cpp-dummy/.gitignore b/examples/cpp-dummy/.gitignore new file mode 100644 index 00000000..9604e78e --- /dev/null +++ b/examples/cpp-dummy/.gitignore @@ -0,0 +1,11 @@ +# preCICE related files +*.log +*events.json +precice-run/ + +# PyCharm related files +.idea +__pycache__ + +# Compiled files +*.so \ No newline at end of file diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp new file mode 100644 index 00000000..8c43b062 --- /dev/null +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -0,0 +1,78 @@ +// Micro simulation +// In this file we solve a dummy micro problem to show how to adjust the macro-micro coupling +// This dummy is written in C++ and is bound to python using pybind11 + +#include "micro_cpp_dummy.hpp" + +// Constructor +MicroSimulation::MicroSimulation(int sim_id) : _sim_id(sim_id), _micro_scalar_data(0), _checkpoint(0) {} + +// Initialize +void MicroSimulation::initialize() +{ + std::cout << "Initialize micro problem (" << _sim_id << ")\n"; + _micro_scalar_data = 0; + _micro_vector_data.clear(); + _checkpoint = 0; +} + +// Solve +py::dict MicroSimulation::solve(py::dict macro_data, double dt) +{ + std::cout << "Solve timestep of micro problem (" << _sim_id << ")\n"; + + + //! Insert your solving routine here, changing the data and casting it to the correct type + + // create double variable from macro_data["micro_scalar_data"]; which is a python float + double macro_scalar_data = macro_data["macro-scalar-data"].cast(); + + // macro_write_data["micro_vector_data"] is a numpy array + py::array_t macro_vector_data = macro_data["macro-vector-data"].cast>(); + _micro_vector_data = std::vector(macro_vector_data.data(), macro_vector_data.data() + macro_vector_data.size()); // convert numpy array to std::vector. + + // Change data + _micro_scalar_data = macro_scalar_data + 1.; + for (uint i = 0; i < _micro_vector_data.size(); i++) + { + _micro_vector_data[i] += 1.; + } + + // Convert data to a py::dict again to send it back to the Micro Manager + py::dict micro_write_data; + // add micro_scalar_data and micro_vector_data to micro_write_data + micro_write_data["micro-scalar-data"] = _micro_scalar_data; + micro_write_data["micro-vector-data"] = _micro_vector_data; // numpy array is automatically converted to python list + + // return micro_write_data + return micro_write_data; +} +// Save Checkpoint +void MicroSimulation::save_checkpoint() +{ + std::cout << "Saving state of micro problem (" << _sim_id << ")\n"; + _checkpoint = _micro_scalar_data; +} + +// Reload Checkpoint +void MicroSimulation::reload_checkpoint() +{ + std::cout << "Reverting to old state of micro problem (" << _sim_id << ")\n"; + _micro_scalar_data = _checkpoint; +} + +PYBIND11_MODULE(micro_dummy, m) { + // optional docstring + m.doc() = "pybind11 micro dummy plugin"; + + py::class_(m, "MicroSimulation") + .def(py::init()) + .def("initialize", &MicroSimulation::initialize) + .def("solve", &MicroSimulation::solve) + .def("save_checkpoint", &MicroSimulation::save_checkpoint) + .def("reload_checkpoint", &MicroSimulation::reload_checkpoint); +} + +// compile with +// c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) +// To check if python is able to import it, run python3 -c "import micro_dummy; micro_dummy.MicroSimulation(1)" from the same directory \ No newline at end of file diff --git a/examples/cpp-dummy/micro_cpp_dummy.hpp b/examples/cpp-dummy/micro_cpp_dummy.hpp new file mode 100644 index 00000000..9bca6015 --- /dev/null +++ b/examples/cpp-dummy/micro_cpp_dummy.hpp @@ -0,0 +1,29 @@ +// This is the header file for the micro simulation class. +// It is included in the micro_cpp_dummy.cpp file and the micro_cpp_dummy.cpp file is compiled with pybind11 to create a python module. +// The python module is then imported in the Micro Manager. + +#pragma once +#include +#include +#include +#include // numpy arrays +#include // std::vector conversion + +namespace py = pybind11; + +class MicroSimulation +{ +public: + MicroSimulation(int sim_id); + void initialize(); + // solve takes python dict for macro_write data, dt, and returns python dict for macro_read data + py::dict solve(py::dict macro_write_data, double dt); + void save_checkpoint(); + void reload_checkpoint(); + +private: + int _sim_id; + double _micro_scalar_data; + std::vector _micro_vector_data; + double _checkpoint; +}; diff --git a/examples/macro-micro-dummy/run_micro_manager.py b/examples/cpp-dummy/run_micro_manager.py similarity index 68% rename from examples/macro-micro-dummy/run_micro_manager.py rename to examples/cpp-dummy/run_micro_manager.py index bfb69ab5..eee9dbe9 100644 --- a/examples/macro-micro-dummy/run_micro_manager.py +++ b/examples/cpp-dummy/run_micro_manager.py @@ -4,7 +4,7 @@ from micro_manager import MicroManager -manager = MicroManager("./micro-manager-config.json") +manager = MicroManager("../micro-manager-config.json") manager.initialize() diff --git a/examples/macro-micro-dummy/README.md b/examples/macro-micro-dummy/README.md deleted file mode 100644 index e3cd3eb9..00000000 --- a/examples/macro-micro-dummy/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Install Dependencies - -* [preCICE](https://github.com/precice/precice) -* [pyprecice](https://github.com/precice/python-bindings) -* [Micro Manager](https://github.com/precice/micro-manager) - -# Run - -Run the dummy macro solver by running - -```bash -python3 macro_solver.py -``` - -Run the Micro Manager by running - -```bash -micro_manager micro-manager-config.json -``` - -or - -```bash -python3 run_micro_manager.py -``` - -# Next Steps - -If you want to couple any other solver against this dummy solver be sure to adjust the preCICE configuration (participant names, mesh names, data names etc.) to the needs of your solver, compare our [step-by-step guide for new adapters](https://github.com/precice/precice/wiki/Adapter-Example). diff --git a/examples/macro-micro-dummy/macro_dummy.py b/examples/macro_dummy.py similarity index 100% rename from examples/macro-micro-dummy/macro_dummy.py rename to examples/macro_dummy.py diff --git a/examples/macro-micro-dummy/micro-manager-config.json b/examples/micro-manager-config.json similarity index 90% rename from examples/macro-micro-dummy/micro-manager-config.json rename to examples/micro-manager-config.json index 390c2f2b..fd52c953 100644 --- a/examples/macro-micro-dummy/micro-manager-config.json +++ b/examples/micro-manager-config.json @@ -1,7 +1,7 @@ { "micro_file_name": "micro_dummy", "coupling_params": { - "config_file_name": "precice-config.xml", + "config_file_name": "./precice-config.xml", "macro_mesh_name": "macro-mesh", "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} diff --git a/examples/macro-micro-dummy/precice-config.xml b/examples/precice-config.xml similarity index 100% rename from examples/macro-micro-dummy/precice-config.xml rename to examples/precice-config.xml diff --git a/examples/python-dummy/.gitignore b/examples/python-dummy/.gitignore new file mode 100644 index 00000000..cc479d19 --- /dev/null +++ b/examples/python-dummy/.gitignore @@ -0,0 +1,8 @@ +# preCICE related files +*.log +*events.json +precice-run/ + +# PyCharm related files +.idea +__pycache__ diff --git a/examples/macro-micro-dummy/micro_dummy.py b/examples/python-dummy/micro_dummy.py similarity index 100% rename from examples/macro-micro-dummy/micro_dummy.py rename to examples/python-dummy/micro_dummy.py diff --git a/examples/python-dummy/run_micro_manager.py b/examples/python-dummy/run_micro_manager.py new file mode 100644 index 00000000..eee9dbe9 --- /dev/null +++ b/examples/python-dummy/run_micro_manager.py @@ -0,0 +1,11 @@ +""" +Script to run the Micro Manager +""" + +from micro_manager import MicroManager + +manager = MicroManager("../micro-manager-config.json") + +manager.initialize() + +manager.solve() From 95db10482c52854c7f8b660e8ad5f9757a8c72b4 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Mon, 13 Mar 2023 13:56:58 +0100 Subject: [PATCH 09/87] Do some syntax changes (#26) * Do some syntax changes * Not use // as it may return float * Undo copy commit --- micro_manager/adaptivity.py | 13 +++---------- micro_manager/config.py | 18 +++++------------- micro_manager/micro_manager.py | 5 +---- 3 files changed, 9 insertions(+), 27 deletions(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index 4ff3dfd2..444e7772 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -49,10 +49,8 @@ def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np elif data.ndim == 2: _, dim = data.shape - counter_1 = 0 - for id_1 in range(self._number_of_sims): - counter_2 = 0 - for id_2 in range(self._number_of_sims): + for counter_1, id_1 in enumerate(range(self._number_of_sims)): + for counter_2, id_2 in enumerate(range(self._number_of_sims)): data_diff = 0 if id_1 != id_2: if dim: @@ -64,8 +62,6 @@ def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np _similarity_dists[id_1, id_2] += dt * data_diff else: _similarity_dists[id_1, id_2] = 0 - counter_2 += 1 - counter_1 += 1 return _similarity_dists @@ -188,12 +184,9 @@ def _check_for_activation( micro_sim_states : numpy array 1D array having state (active or inactive) of each micro simulation """ - dists = [] - active_sim_ids = np.where(micro_sim_states == 1)[0] - for active_id in active_sim_ids: - dists.append(similarity_dists[inactive_id, active_id]) + dists = similarity_dists[inactive_id, active_sim_ids] # If inactive sim is not similar to any active sim, activate it return min(dists) > self._ref_tol diff --git a/micro_manager/config.py b/micro_manager/config.py index 2b431602..2b6b7a8f 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -55,17 +55,11 @@ def read_json(self, config_filename): """ folder = os.path.dirname(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), config_filename)) path = os.path.join(folder, os.path.basename(config_filename)) - read_file = open(path, "r") - data = json.load(read_file) - - self._micro_file_name = data["micro_file_name"] - i = 0 - micro_filename = list(self._micro_file_name) - for c in micro_filename: - if c == '/': - micro_filename[i] = '.' - i += 1 - self._micro_file_name = ''.join(micro_filename) + with open(path, "r") as read_file: + data = json.load(read_file) + + # convert paths to python-importable paths + self._micro_file_name = data["micro_file_name"].replace("/", ".").replace("\\", ".").replace(".py", "") self._config_file_name = os.path.join(folder, data["coupling_params"]["config_file_name"]) self._macro_mesh_name = data["coupling_params"]["macro_mesh_name"] @@ -155,8 +149,6 @@ def read_json(self, config_filename): except BaseException: print("Micro manager will not output time required to solve each micro simulation in each time step.") - read_file.close() - def get_config_file_name(self): """ Get the name of the JSON configuration file. diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 4cc72079..38751e69 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -251,10 +251,7 @@ def initialize(self) -> None: self._global_number_of_micro_sims = np.sum(nms_all_ranks) # Create all micro simulations - sim_id = 0 - if self._rank != 0: - for i in range(self._rank - 1, -1, -1): - sim_id += nms_all_ranks[i] + sim_id = np.sum(nms_all_ranks[:self._rank]) self._micro_sims = [] self._micro_sim_global_ids = [] From 9a06a5f60d2c3ef11451ee7e6bd4bb5cf3312f3f Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 17 Mar 2023 18:48:26 +0100 Subject: [PATCH 10/87] Fix bug in adaptivity association step (#27) * Make a copy of the active sim object which is associated to an inactive sim and put it in the list * Copy list of micro simulations in the association step and return the copied instance * Reduce total time of adaptivity integration test from 10 to 2 * Properly handling copying of active micro sim objects to the associated inactive ones --- micro_manager/adaptivity.py | 23 +++++++++++++++++-- micro_manager/micro_manager.py | 4 ++-- .../test_adaptivity/precice-config.xml | 2 +- 3 files changed, 24 insertions(+), 5 deletions(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index 444e7772..ca9fb27f 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -3,6 +3,7 @@ """ import numpy as np import sys +from copy import deepcopy class AdaptiveController: @@ -195,7 +196,7 @@ def associate_inactive_to_active( self, similarity_dists: np.ndarray, micro_sim_states: np.ndarray, - micro_sims: list) -> None: + micro_sims: list) -> list: """ Associate inactive micro simulations to most similar active micro simulation. @@ -208,6 +209,8 @@ def associate_inactive_to_active( micro_sims : list List of objects of class MicroProblem, which are the micro simulations """ + _micro_sims = micro_sims.copy() + active_sim_ids = np.where(micro_sim_states == 1)[0] inactive_sim_ids = np.where(micro_sim_states == 0)[0] @@ -219,4 +222,20 @@ def associate_inactive_to_active( if similarity_dists[inactive_id, active_id] < dist_min: most_similar_active_id = active_id dist_min = similarity_dists[inactive_id, active_id] - micro_sims[inactive_id].is_most_similar_to(most_similar_active_id) + + # Only copy active micro sim object if the inactive sim is associated to a + # different active micro sim than in t_{n-1} + if most_similar_active_id != _micro_sims[inactive_id].get_most_similar_active_id(): + # Effectively kill the micro sim object associated to the inactive ID + _micro_sims[inactive_id] = None + + # Make a copy of the micro sim object associated to the active ID and add + # it at the correct location in the list micro_sims + _micro_sims[inactive_id] = deepcopy(micro_sims[most_similar_active_id]) + + # Redo the deactivation and association step because an active sim object + # has been copied over, so its properties are still those of an active sim + _micro_sims[inactive_id].deactivate() + _micro_sims[inactive_id].is_most_similar_to(most_similar_active_id) + + return _micro_sims diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 38751e69..feb34d92 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -361,7 +361,7 @@ def write_data_to_precice(self, micro_sims_output: list) -> None: def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_nm1: np.ndarray): """ - Compute adaptivity based on similartiy distances and micro simulation states from t_{n-1} + Compute adaptivity based on similarity distances and micro simulation states from t_{n-1} Parameters ---------- @@ -391,7 +391,7 @@ def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_ micro_sim_states_n = self._adaptivity_controller.update_inactive_micro_sims( similarity_dists_n, micro_sim_states_n, self._micro_sims) - self._adaptivity_controller.associate_inactive_to_active( + self._micro_sims = self._adaptivity_controller.associate_inactive_to_active( similarity_dists_n, micro_sim_states_n, self._micro_sims) assert np.any(micro_sim_states_n), "There are no active simulations, which is not possible." diff --git a/tests/integration/test_adaptivity/precice-config.xml b/tests/integration/test_adaptivity/precice-config.xml index aad23097..a5072746 100644 --- a/tests/integration/test_adaptivity/precice-config.xml +++ b/tests/integration/test_adaptivity/precice-config.xml @@ -50,7 +50,7 @@ - + From 0ac7a15f18f7bbbe293c2dd50d501a0e9063bef6 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sun, 26 Mar 2023 17:34:25 +0200 Subject: [PATCH 11/87] Adjust configuration so that adaptivity can be switched off without removing all settings --- micro_manager/config.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/micro_manager/config.py b/micro_manager/config.py index 2b6b7a8f..14f2639f 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -99,13 +99,15 @@ def read_json(self, config_filename): "in every time window.") try: - adaptivity = data["simulation_params"]["adaptivity"] - - if adaptivity == "True": + if data["simulation_params"]["adaptivity"] == "True": self._adaptivity = True - elif adaptivity == "False": + elif data["simulation_params"]["adaptivity"] == "False": self._adaptivity = False + except BaseException: + print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations " + "in all time steps.") + if self._adaptivity: exchange_data = {**self._read_data_names, **self._write_data_names} for dname in data["simulation_params"]["adaptivity_data"]: self._data_for_adaptivity[dname] = exchange_data[dname] @@ -125,9 +127,6 @@ def read_json(self, config_filename): self._write_data_names["active_state"] = False self._write_data_names["active_steps"] = False - except BaseException: - print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations " - "in all time steps.") try: diagnostics_data_names = data["diagnostics"]["data_from_micro_sims"] From 9fce27172235e5b2a93efa77726a69b3d7bb1ef9 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 30 Mar 2023 11:22:08 +0200 Subject: [PATCH 12/87] Copy active micro simulation object to inactive one when it is being reactivated --- micro_manager/adaptivity.py | 26 +++++++------------------- micro_manager/micro_manager.py | 2 +- 2 files changed, 8 insertions(+), 20 deletions(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index ca9fb27f..2f8c2294 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -163,7 +163,12 @@ def update_inactive_micro_sims( for i in range(self._number_of_sims): if not _micro_sim_states[i]: # if id is inactive if self._check_for_activation(i, similarity_dists, _micro_sim_states): - micro_sims[i].activate() + # Effectively kill the micro sim object associated to the inactive ID + micro_sims[i] = None + + # Make a copy of the associated active micro sim object + micro_sims[i] = deepcopy(micro_sims[micro_sims[i].get_most_similar_active_id()]) + _micro_sim_states[i] = 1 return _micro_sim_states @@ -209,8 +214,6 @@ def associate_inactive_to_active( micro_sims : list List of objects of class MicroProblem, which are the micro simulations """ - _micro_sims = micro_sims.copy() - active_sim_ids = np.where(micro_sim_states == 1)[0] inactive_sim_ids = np.where(micro_sim_states == 0)[0] @@ -223,19 +226,4 @@ def associate_inactive_to_active( most_similar_active_id = active_id dist_min = similarity_dists[inactive_id, active_id] - # Only copy active micro sim object if the inactive sim is associated to a - # different active micro sim than in t_{n-1} - if most_similar_active_id != _micro_sims[inactive_id].get_most_similar_active_id(): - # Effectively kill the micro sim object associated to the inactive ID - _micro_sims[inactive_id] = None - - # Make a copy of the micro sim object associated to the active ID and add - # it at the correct location in the list micro_sims - _micro_sims[inactive_id] = deepcopy(micro_sims[most_similar_active_id]) - - # Redo the deactivation and association step because an active sim object - # has been copied over, so its properties are still those of an active sim - _micro_sims[inactive_id].deactivate() - _micro_sims[inactive_id].is_most_similar_to(most_similar_active_id) - - return _micro_sims + micro_sims[inactive_id].is_most_similar_to(most_similar_active_id) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index feb34d92..536eb499 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -391,7 +391,7 @@ def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_ micro_sim_states_n = self._adaptivity_controller.update_inactive_micro_sims( similarity_dists_n, micro_sim_states_n, self._micro_sims) - self._micro_sims = self._adaptivity_controller.associate_inactive_to_active( + self._adaptivity_controller.associate_inactive_to_active( similarity_dists_n, micro_sim_states_n, self._micro_sims) assert np.any(micro_sim_states_n), "There are no active simulations, which is not possible." From a2d3b8e968b8d9579153fe014d95a0b313e49e0b Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 30 Mar 2023 11:53:08 +0200 Subject: [PATCH 13/87] Move initiliazation of micro sim states outside of adaptivity calculation --- micro_manager/adaptivity.py | 9 +++------ micro_manager/micro_manager.py | 12 ++++++++++-- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index 2f8c2294..fda40c1e 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -155,20 +155,17 @@ def update_inactive_micro_sims( _micro_sim_states = np.copy(micro_sim_states) # Input micro_sim_states is not longer used after this point - if not np.any(_micro_sim_states): - micro_sims[0].activate() - _micro_sim_states[0] = 1 # If all sims are inactive, activate the first one (a random choice) - # Update the set of inactive micro sims for i in range(self._number_of_sims): if not _micro_sim_states[i]: # if id is inactive if self._check_for_activation(i, similarity_dists, _micro_sim_states): + associated_active_id = micro_sims[i].get_most_similar_active_id() + # Effectively kill the micro sim object associated to the inactive ID micro_sims[i] = None # Make a copy of the associated active micro sim object - micro_sims[i] = deepcopy(micro_sims[micro_sims[i].get_most_similar_active_id()]) - + micro_sims[i] = deepcopy(micro_sims[associated_active_id]) _micro_sim_states[i] = 1 return _micro_sim_states diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 536eb499..532a0ec6 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -38,7 +38,7 @@ def __init__(self, local_id, global_id): base_micro_simulation.__init__(self, local_id) self._local_id = local_id self._global_id = global_id - self._is_active = False + self._is_active = False # Simulation is created in an inactive state self._most_similar_active_local_id = None def get_local_id(self): @@ -475,12 +475,20 @@ def solve(self): t, n = 0, 0 t_checkpoint, n_checkpoint = 0, 0 similarity_dists = np.zeros((self._local_number_of_micro_sims, self._local_number_of_micro_sims)) - micro_sim_states = np.ones((self._local_number_of_micro_sims)) # Start with all active simulations + micro_sim_states = np.ones((self._local_number_of_micro_sims)) # By default all sims are active if self._is_adaptivity_on: # Start adaptivity calculation with all sims inactive micro_sim_states = np.zeros((self._local_number_of_micro_sims)) + # If all sims are inactive, activate the first one (a random choice) + self._micro_sims[0].activate() + micro_sim_states[0] = 1 + + # All inactive sims are associated to the one active sim + for i in range(1, self._local_number_of_micro_sims): + self._micro_sims[i].is_most_similar_to(0) + similarity_dists_cp = None micro_sim_states_cp = None micro_sims_cp = None From eebc52394122888d13d81facda11d5b8a5a0fbb4 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 31 Mar 2023 12:47:50 +0200 Subject: [PATCH 14/87] Change names of association step functions and variables --- micro_manager/adaptivity.py | 6 +++--- micro_manager/micro_manager.py | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index fda40c1e..a7b69117 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -159,7 +159,7 @@ def update_inactive_micro_sims( for i in range(self._number_of_sims): if not _micro_sim_states[i]: # if id is inactive if self._check_for_activation(i, similarity_dists, _micro_sim_states): - associated_active_id = micro_sims[i].get_most_similar_active_id() + associated_active_id = micro_sims[i].get_associated_active_id() # Effectively kill the micro sim object associated to the inactive ID micro_sims[i] = None @@ -220,7 +220,7 @@ def associate_inactive_to_active( for active_id in active_sim_ids: # Find most similar active sim for every inactive sim if similarity_dists[inactive_id, active_id] < dist_min: - most_similar_active_id = active_id + associated_active_id = active_id dist_min = similarity_dists[inactive_id, active_id] - micro_sims[inactive_id].is_most_similar_to(most_similar_active_id) + micro_sims[inactive_id].is_associated_to(associated_active_id) \ No newline at end of file diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 532a0ec6..d88c6cb9 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -39,7 +39,7 @@ def __init__(self, local_id, global_id): self._local_id = local_id self._global_id = global_id self._is_active = False # Simulation is created in an inactive state - self._most_similar_active_local_id = None + self._associated_active_local_id = None def get_local_id(self): return self._local_id @@ -53,15 +53,15 @@ def activate(self): def deactivate(self): self._is_active = False - def is_most_similar_to(self, similar_active_local_id): + def is_associated_to(self, similar_active_local_id): assert not self._is_active, "Micro simulation {} is active and hence cannot be most similar to another active simulation".format( self._global_id) - self._most_similar_active_local_id = similar_active_local_id + self._associated_active_local_id = similar_active_local_id - def get_most_similar_active_id(self): + def get_associated_active_id(self): assert not self._is_active, "Micro simulation {} is active and hence cannot have a most similar active id".format( self._global_id) - return self._most_similar_active_local_id + return self._associated_active_local_id def is_active(self): return self._is_active @@ -449,10 +449,10 @@ def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.n for inactive_id in inactive_sim_ids: # self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro sim [{}]".format( # self._micro_sims[inactive_id].get_global_id(), - # self._micro_sim_global_ids[self._micro_sims[inactive_id].get_most_similar_active_id()])) + # self._micro_sim_global_ids[self._micro_sims[inactive_id].get_associated_active_id()])) micro_sims_output[inactive_id] = dict() - for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_most_similar_active_id()].items(): + for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_associated_active_id()].items(): micro_sims_output[inactive_id][dname] = values if self._is_adaptivity_on: @@ -487,7 +487,7 @@ def solve(self): # All inactive sims are associated to the one active sim for i in range(1, self._local_number_of_micro_sims): - self._micro_sims[i].is_most_similar_to(0) + self._micro_sims[i].is_associated_to(0) similarity_dists_cp = None micro_sim_states_cp = None @@ -551,7 +551,7 @@ def solve(self): self._interface.mark_action_fulfilled( precice.action_read_iteration_checkpoint()) else: # Time window has converged, now micro output can be generated - self._logger.info("Micro simulations {} - {}: time window t = {} has converged".format( + self._logger.info("Micro simulations {} - {} have converged at t = {}".format( self._micro_sims[0].get_global_id(), self._micro_sims[-1].get_global_id(), t)) if self._micro_sims_have_output: From 41fba1e5e5f45d4d67e32fdfbb2f450f3009f142 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 31 Mar 2023 12:56:07 +0200 Subject: [PATCH 15/87] Formatting --- micro_manager/adaptivity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index a7b69117..5152bbd0 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -223,4 +223,4 @@ def associate_inactive_to_active( associated_active_id = active_id dist_min = similarity_dists[inactive_id, active_id] - micro_sims[inactive_id].is_associated_to(associated_active_id) \ No newline at end of file + micro_sims[inactive_id].is_associated_to(associated_active_id) From 2cf45530c921950db869035e34d54372bad5aa2e Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 13 Apr 2023 12:39:23 +0200 Subject: [PATCH 16/87] Formatting the C++ solver dummy --- examples/cpp-dummy/micro_cpp_dummy.cpp | 30 +++++++++++++++----------- examples/cpp-dummy/micro_cpp_dummy.hpp | 2 +- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index 8c43b062..a2335cae 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -1,6 +1,14 @@ // Micro simulation -// In this file we solve a dummy micro problem to show how to adjust the macro-micro coupling -// This dummy is written in C++ and is bound to python using pybind11 +// In this file we solve a dummy micro problem which is controlled by the Micro Manager +// This dummy is written in C++ and is controllable via Python using pybind11 +// +// Compile your pybind-11 wrapped code with: +// +// c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) +// +// To check if python is able to import it, run: +// python3 -c "import micro_dummy; micro_dummy.MicroSimulation(1)" +// from the same directory #include "micro_cpp_dummy.hpp" @@ -21,13 +29,12 @@ py::dict MicroSimulation::solve(py::dict macro_data, double dt) { std::cout << "Solve timestep of micro problem (" << _sim_id << ")\n"; + //! Code below shows how to convert input macro data and use it in your C++ solver - //! Insert your solving routine here, changing the data and casting it to the correct type - - // create double variable from macro_data["micro_scalar_data"]; which is a python float + // Create a double from macro_data["micro_scalar_data"], which is a Python float double macro_scalar_data = macro_data["macro-scalar-data"].cast(); - // macro_write_data["micro_vector_data"] is a numpy array + // Create a pybind style Numpy array from macro_write_data["micro_vector_data"], which is a Numpy array py::array_t macro_vector_data = macro_data["macro-vector-data"].cast>(); _micro_vector_data = std::vector(macro_vector_data.data(), macro_vector_data.data() + macro_vector_data.size()); // convert numpy array to std::vector. @@ -40,21 +47,22 @@ py::dict MicroSimulation::solve(py::dict macro_data, double dt) // Convert data to a py::dict again to send it back to the Micro Manager py::dict micro_write_data; + // add micro_scalar_data and micro_vector_data to micro_write_data micro_write_data["micro-scalar-data"] = _micro_scalar_data; micro_write_data["micro-vector-data"] = _micro_vector_data; // numpy array is automatically converted to python list - // return micro_write_data return micro_write_data; } -// Save Checkpoint + +// Save Checkpoint -- only valid for implicit coupling void MicroSimulation::save_checkpoint() { std::cout << "Saving state of micro problem (" << _sim_id << ")\n"; _checkpoint = _micro_scalar_data; } -// Reload Checkpoint +// Reload Checkpoint -- only valid for implicit coupling void MicroSimulation::reload_checkpoint() { std::cout << "Reverting to old state of micro problem (" << _sim_id << ")\n"; @@ -72,7 +80,3 @@ PYBIND11_MODULE(micro_dummy, m) { .def("save_checkpoint", &MicroSimulation::save_checkpoint) .def("reload_checkpoint", &MicroSimulation::reload_checkpoint); } - -// compile with -// c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) -// To check if python is able to import it, run python3 -c "import micro_dummy; micro_dummy.MicroSimulation(1)" from the same directory \ No newline at end of file diff --git a/examples/cpp-dummy/micro_cpp_dummy.hpp b/examples/cpp-dummy/micro_cpp_dummy.hpp index 9bca6015..1a767adc 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.hpp +++ b/examples/cpp-dummy/micro_cpp_dummy.hpp @@ -16,7 +16,7 @@ class MicroSimulation public: MicroSimulation(int sim_id); void initialize(); - // solve takes python dict for macro_write data, dt, and returns python dict for macro_read data + // solve takes a python dict data, and the timestep dt as inputs, and returns a python dict py::dict solve(py::dict macro_write_data, double dt); void save_checkpoint(); void reload_checkpoint(); From b599cad188fc76f37f819fe811b9e0d28fd1663e Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Thu, 13 Apr 2023 20:02:50 +0200 Subject: [PATCH 17/87] Add test for solverdummy with adaptivity --- .github/workflows/run-macro-micro-dummy.yml | 13 ++++ .../cpp-dummy/run_micro_manager_adaptivity.py | 11 ++++ examples/macro_dummy.py | 4 ++ examples/micro-manager-adaptivity-config.json | 21 ++++++ examples/precice-config-adaptivity.xml | 64 +++++++++++++++++++ .../run_micro_manager_adaptivity.py | 11 ++++ 6 files changed, 124 insertions(+) create mode 100644 examples/cpp-dummy/run_micro_manager_adaptivity.py create mode 100644 examples/micro-manager-adaptivity-config.json create mode 100644 examples/precice-config-adaptivity.xml create mode 100644 examples/python-dummy/run_micro_manager_adaptivity.py diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 3ebd60ef..fc18c39d 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -12,20 +12,29 @@ jobs: runs-on: ubuntu-latest container: precice/precice steps: + - name: Checkout Repository uses: actions/checkout@v2 + - name: Install Dependencies run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config python -m pip install --upgrade pip pip install setuptools wheel twine + - name: Install micro-manager run: pip3 install --user . + - name: Run python macro-micro dummy run: | cd examples/ python3 macro_dummy.py & python3 python-dummy/run_micro_manager.py + + - name: Run adaptive python macro-micro dummy + run: | + python3 macro_dummy.py & python3 python-dummy/run_micro_manager_adaptivity.py + - name: Run c++ macro-micro dummy run: | cd examples/cpp-dummy/ @@ -33,3 +42,7 @@ jobs: c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) cd ../ python3 macro_dummy.py & python3 cpp-dummy/run_micro_manager.py + + - name: Run adaptive c++ macro-micro dummy + run: | + python3 macro_dummy.py & python3 cpp-dummy/run_micro_manager_adaptivity.py diff --git a/examples/cpp-dummy/run_micro_manager_adaptivity.py b/examples/cpp-dummy/run_micro_manager_adaptivity.py new file mode 100644 index 00000000..a26cb63e --- /dev/null +++ b/examples/cpp-dummy/run_micro_manager_adaptivity.py @@ -0,0 +1,11 @@ +""" +Script to run the Micro Manager +""" + +from micro_manager import MicroManager + +manager = MicroManager("../micro-manager-adaptivity-config.json") + +manager.initialize() + +manager.solve() diff --git a/examples/macro_dummy.py b/examples/macro_dummy.py index fbc5b03d..097c69cd 100644 --- a/examples/macro_dummy.py +++ b/examples/macro_dummy.py @@ -83,6 +83,10 @@ def main(): for i in range(nv): for d in range(interface.get_dimensions()): write_vector_data[i, d] = read_vector_data[i, d] + if t>1: # to trigger adaptivity after some time + # ensure that the data is different from the previous time step + # previously inactive microsimulations will be activated + write_vector_data[i, d] += np.random.randint(0, 10) for name, dim in write_data_names.items(): if dim == 0: diff --git a/examples/micro-manager-adaptivity-config.json b/examples/micro-manager-adaptivity-config.json new file mode 100644 index 00000000..c714a991 --- /dev/null +++ b/examples/micro-manager-adaptivity-config.json @@ -0,0 +1,21 @@ +{ + "micro_file_name": "micro_dummy", + "coupling_params": { + "config_file_name": "./precice-config.xml", + "macro_mesh_name": "macro-mesh", + "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, + "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], + "adaptivity": "True", + "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], + "adaptivity_history_param": 0.5, + "adaptivity_coarsening_constant": 0.3, + "adaptivity_refining_constant": 0.4, + "adaptivity_every_implicit_iteration": "True" + }, + "diagnostics": { + "output_micro_sim_solve_time": "True" + } +} diff --git a/examples/precice-config-adaptivity.xml b/examples/precice-config-adaptivity.xml new file mode 100644 index 00000000..a3764b26 --- /dev/null +++ b/examples/precice-config-adaptivity.xml @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/python-dummy/run_micro_manager_adaptivity.py b/examples/python-dummy/run_micro_manager_adaptivity.py new file mode 100644 index 00000000..a26cb63e --- /dev/null +++ b/examples/python-dummy/run_micro_manager_adaptivity.py @@ -0,0 +1,11 @@ +""" +Script to run the Micro Manager +""" + +from micro_manager import MicroManager + +manager = MicroManager("../micro-manager-adaptivity-config.json") + +manager.initialize() + +manager.solve() From 961501de03b2705730125d4e246d9dd7435d1773 Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Thu, 13 Apr 2023 20:07:56 +0200 Subject: [PATCH 18/87] Correct path --- .github/workflows/run-macro-micro-dummy.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index fc18c39d..0832bba5 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -33,6 +33,7 @@ jobs: - name: Run adaptive python macro-micro dummy run: | + cd examples/ python3 macro_dummy.py & python3 python-dummy/run_micro_manager_adaptivity.py - name: Run c++ macro-micro dummy @@ -45,4 +46,5 @@ jobs: - name: Run adaptive c++ macro-micro dummy run: | + cd examples/ python3 macro_dummy.py & python3 cpp-dummy/run_micro_manager_adaptivity.py From 39e5210e76404d6c5f7d583ec5119a3d48051af6 Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Thu, 13 Apr 2023 20:17:48 +0200 Subject: [PATCH 19/87] Change to correct precice-config and format --- examples/macro_dummy.py | 4 ++-- examples/micro-manager-adaptivity-config.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/macro_dummy.py b/examples/macro_dummy.py index 097c69cd..82917f2c 100644 --- a/examples/macro_dummy.py +++ b/examples/macro_dummy.py @@ -83,10 +83,10 @@ def main(): for i in range(nv): for d in range(interface.get_dimensions()): write_vector_data[i, d] = read_vector_data[i, d] - if t>1: # to trigger adaptivity after some time + if t > 1: # to trigger adaptivity after some time # ensure that the data is different from the previous time step # previously inactive microsimulations will be activated - write_vector_data[i, d] += np.random.randint(0, 10) + write_vector_data[i, d] += np.random.randint(0, 10) for name, dim in write_data_names.items(): if dim == 0: diff --git a/examples/micro-manager-adaptivity-config.json b/examples/micro-manager-adaptivity-config.json index c714a991..eb829ea1 100644 --- a/examples/micro-manager-adaptivity-config.json +++ b/examples/micro-manager-adaptivity-config.json @@ -1,7 +1,7 @@ { "micro_file_name": "micro_dummy", "coupling_params": { - "config_file_name": "./precice-config.xml", + "config_file_name": "./precice-config-adaptivity.xml", "macro_mesh_name": "macro-mesh", "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} From 3dba4343b2ae55f01760c34feec3c14fcbe9262a Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Thu, 13 Apr 2023 20:25:01 +0200 Subject: [PATCH 20/87] Add __deepcopy__ function and switch test order --- .github/workflows/run-macro-micro-dummy.yml | 8 ++++---- examples/cpp-dummy/micro_cpp_dummy.cpp | 18 +++++++++++++++--- examples/cpp-dummy/micro_cpp_dummy.hpp | 1 + 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 0832bba5..d6c0fe45 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -29,12 +29,12 @@ jobs: - name: Run python macro-micro dummy run: | cd examples/ - python3 macro_dummy.py & python3 python-dummy/run_micro_manager.py + python3 python-dummy/run_micro_manager.py & python3 macro_dummy.py - name: Run adaptive python macro-micro dummy run: | cd examples/ - python3 macro_dummy.py & python3 python-dummy/run_micro_manager_adaptivity.py + python3 python-dummy/run_micro_manager_adaptivity.py & python3 macro_dummy.py - name: Run c++ macro-micro dummy run: | @@ -42,9 +42,9 @@ jobs: pip install pybind11 c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) cd ../ - python3 macro_dummy.py & python3 cpp-dummy/run_micro_manager.py + python3 cpp-dummy/run_micro_manager.py & python3 macro_dummy.py - name: Run adaptive c++ macro-micro dummy run: | cd examples/ - python3 macro_dummy.py & python3 cpp-dummy/run_micro_manager_adaptivity.py + python3 cpp-dummy/run_micro_manager_adaptivity.py & python3 macro_dummy.py diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index a2335cae..e60ef687 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -6,7 +6,7 @@ // // c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) // -// To check if python is able to import it, run: +// To check if python is able to import it, run: // python3 -c "import micro_dummy; micro_dummy.MicroSimulation(1)" // from the same directory @@ -69,7 +69,18 @@ void MicroSimulation::reload_checkpoint() _micro_scalar_data = _checkpoint; } -PYBIND11_MODULE(micro_dummy, m) { +// For adaptivity only: Need to be able to deepcopy the object +MicroSimulation MicroSimulation::__deepcopy__(py::dict memo) +{ + MicroSimulation new_sim(_sim_id); + new_sim._micro_scalar_data = _micro_scalar_data; + new_sim._micro_vector_data = _micro_vector_data; + new_sim._checkpoint = _checkpoint; + return new_sim; +} + +PYBIND11_MODULE(micro_dummy, m) +{ // optional docstring m.doc() = "pybind11 micro dummy plugin"; @@ -78,5 +89,6 @@ PYBIND11_MODULE(micro_dummy, m) { .def("initialize", &MicroSimulation::initialize) .def("solve", &MicroSimulation::solve) .def("save_checkpoint", &MicroSimulation::save_checkpoint) - .def("reload_checkpoint", &MicroSimulation::reload_checkpoint); + .def("reload_checkpoint", &MicroSimulation::reload_checkpoint) + .def("__deepcopy__", &MicroSimulation::__deepcopy__); } diff --git a/examples/cpp-dummy/micro_cpp_dummy.hpp b/examples/cpp-dummy/micro_cpp_dummy.hpp index 1a767adc..b066fa44 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.hpp +++ b/examples/cpp-dummy/micro_cpp_dummy.hpp @@ -20,6 +20,7 @@ class MicroSimulation py::dict solve(py::dict macro_write_data, double dt); void save_checkpoint(); void reload_checkpoint(); + MicroSimulation __deepcopy__(py::dict memo); private: int _sim_id; From 7b99bf1f7198c32a5b65ca358602a65309ca3261 Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Thu, 13 Apr 2023 20:27:18 +0200 Subject: [PATCH 21/87] Add timeout to solverdummies --- .github/workflows/run-macro-micro-dummy.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index d6c0fe45..5d17abe6 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -27,16 +27,19 @@ jobs: run: pip3 install --user . - name: Run python macro-micro dummy + timeout-minutes: 3 run: | cd examples/ python3 python-dummy/run_micro_manager.py & python3 macro_dummy.py - name: Run adaptive python macro-micro dummy + timeout-minutes: 3 run: | cd examples/ python3 python-dummy/run_micro_manager_adaptivity.py & python3 macro_dummy.py - name: Run c++ macro-micro dummy + timeout-minutes: 3 run: | cd examples/cpp-dummy/ pip install pybind11 @@ -45,6 +48,7 @@ jobs: python3 cpp-dummy/run_micro_manager.py & python3 macro_dummy.py - name: Run adaptive c++ macro-micro dummy + timeout-minutes: 3 run: | cd examples/ python3 cpp-dummy/run_micro_manager_adaptivity.py & python3 macro_dummy.py From b9ddb512d64a36647a3fdbc1cc1be1a36dea9b6e Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Thu, 13 Apr 2023 20:29:30 +0200 Subject: [PATCH 22/87] Add adaptivity hints to solverdummy readme --- examples/README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/README.md b/examples/README.md index 4ee6babe..6375bb60 100644 --- a/examples/README.md +++ b/examples/README.md @@ -28,6 +28,7 @@ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_c The command above compiles the C++ solverdummy and creates a shared library that can be imported from python using `pybind11`. - The `$(python3 -m pybind11 --includes)` part is necessary to include the correct header files for `pybind11`. - The `$(python3-config --extension-suffix)` part is necessary to create the correct file extension for the shared library. For more information, see the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/compiling.html#building-manually). +- If you have multiple versions of Python installed, you might have to replace `python3-config` with `python3.8-config` or similar. @@ -39,4 +40,8 @@ python cpp-dummy/run_micro_manager.py ``` When changing the C++ solverdummy to your own solver, make sure to change the `PYBIND11_MODULE` in `micro_cpp_dummy.cpp` to the name that you want to compile to. -For example, if you want to import the module as `my_solver`, change the line to `PYBIND11_MODULE(my_solver, m) {`. Then, change the `micro_file_name` in `micro-manager-config.json` to `my_solver`. \ No newline at end of file +For example, if you want to import the module as `my_solver`, change the line to `PYBIND11_MODULE(my_solver, m) {`. Then, change the `micro_file_name` in `micro-manager-config.json` to `my_solver`. + +### Adaptivity + +For the case of adaptivity, the deepcopy function also has to be implemented for the C++ class. An example is provided in the `cpp-dummy` directory. From ce5fed4a5448d8f3f629e9f27e6f0a0b1edf6aef Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Fri, 14 Apr 2023 10:46:14 +0200 Subject: [PATCH 23/87] Add parser argument to `run_micro_manager` --- .github/workflows/run-macro-micro-dummy.yml | 8 ++++---- examples/cpp-dummy/run_micro_manager.py | 7 ++++++- examples/cpp-dummy/run_micro_manager_adaptivity.py | 11 ----------- examples/python-dummy/run_micro_manager.py | 7 ++++++- examples/python-dummy/run_micro_manager_adaptivity.py | 11 ----------- 5 files changed, 16 insertions(+), 28 deletions(-) delete mode 100644 examples/cpp-dummy/run_micro_manager_adaptivity.py delete mode 100644 examples/python-dummy/run_micro_manager_adaptivity.py diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 5d17abe6..a1c72f62 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -30,13 +30,13 @@ jobs: timeout-minutes: 3 run: | cd examples/ - python3 python-dummy/run_micro_manager.py & python3 macro_dummy.py + python3 python-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py - name: Run adaptive python macro-micro dummy timeout-minutes: 3 run: | cd examples/ - python3 python-dummy/run_micro_manager_adaptivity.py & python3 macro_dummy.py + python3 python-dummy/run_micro_manager_adaptivity.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py - name: Run c++ macro-micro dummy timeout-minutes: 3 @@ -45,10 +45,10 @@ jobs: pip install pybind11 c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) cd ../ - python3 cpp-dummy/run_micro_manager.py & python3 macro_dummy.py + python3 cpp-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py - name: Run adaptive c++ macro-micro dummy timeout-minutes: 3 run: | cd examples/ - python3 cpp-dummy/run_micro_manager_adaptivity.py & python3 macro_dummy.py + python3 cpp-dummy/run_micro_manager_adaptivity.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py diff --git a/examples/cpp-dummy/run_micro_manager.py b/examples/cpp-dummy/run_micro_manager.py index eee9dbe9..a3d0dd6c 100644 --- a/examples/cpp-dummy/run_micro_manager.py +++ b/examples/cpp-dummy/run_micro_manager.py @@ -3,8 +3,13 @@ """ from micro_manager import MicroManager +from argparse import ArgumentParser -manager = MicroManager("../micro-manager-config.json") +parser = ArgumentParser() +parser.add_argument("--config", help="Path to the micro manager configuration file") +args = parser.parse_args() + +manager = MicroManager(args.config) manager.initialize() diff --git a/examples/cpp-dummy/run_micro_manager_adaptivity.py b/examples/cpp-dummy/run_micro_manager_adaptivity.py deleted file mode 100644 index a26cb63e..00000000 --- a/examples/cpp-dummy/run_micro_manager_adaptivity.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -Script to run the Micro Manager -""" - -from micro_manager import MicroManager - -manager = MicroManager("../micro-manager-adaptivity-config.json") - -manager.initialize() - -manager.solve() diff --git a/examples/python-dummy/run_micro_manager.py b/examples/python-dummy/run_micro_manager.py index eee9dbe9..a3d0dd6c 100644 --- a/examples/python-dummy/run_micro_manager.py +++ b/examples/python-dummy/run_micro_manager.py @@ -3,8 +3,13 @@ """ from micro_manager import MicroManager +from argparse import ArgumentParser -manager = MicroManager("../micro-manager-config.json") +parser = ArgumentParser() +parser.add_argument("--config", help="Path to the micro manager configuration file") +args = parser.parse_args() + +manager = MicroManager(args.config) manager.initialize() diff --git a/examples/python-dummy/run_micro_manager_adaptivity.py b/examples/python-dummy/run_micro_manager_adaptivity.py deleted file mode 100644 index a26cb63e..00000000 --- a/examples/python-dummy/run_micro_manager_adaptivity.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -Script to run the Micro Manager -""" - -from micro_manager import MicroManager - -manager = MicroManager("../micro-manager-adaptivity-config.json") - -manager.initialize() - -manager.solve() From 8968f01316f7fe75d27e6331688fc783ce1a7e04 Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Fri, 14 Apr 2023 15:17:05 +0200 Subject: [PATCH 24/87] Correct path and make config file mandatory --- .github/workflows/run-macro-micro-dummy.yml | 8 ++++---- examples/cpp-dummy/run_micro_manager.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index a1c72f62..ceab43cf 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -30,13 +30,13 @@ jobs: timeout-minutes: 3 run: | cd examples/ - python3 python-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py + python3 python-dummy/run_micro_manager.py --config ../micro-manager-config.json & python3 macro_dummy.py - name: Run adaptive python macro-micro dummy timeout-minutes: 3 run: | cd examples/ - python3 python-dummy/run_micro_manager_adaptivity.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py + python3 python-dummy/run_micro_manager_adaptivity.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py - name: Run c++ macro-micro dummy timeout-minutes: 3 @@ -45,10 +45,10 @@ jobs: pip install pybind11 c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) cd ../ - python3 cpp-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py + python3 cpp-dummy/run_micro_manager.py --config ../micro-manager-config.json & python3 macro_dummy.py - name: Run adaptive c++ macro-micro dummy timeout-minutes: 3 run: | cd examples/ - python3 cpp-dummy/run_micro_manager_adaptivity.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py + python3 cpp-dummy/run_micro_manager_adaptivity.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py diff --git a/examples/cpp-dummy/run_micro_manager.py b/examples/cpp-dummy/run_micro_manager.py index a3d0dd6c..e6afd394 100644 --- a/examples/cpp-dummy/run_micro_manager.py +++ b/examples/cpp-dummy/run_micro_manager.py @@ -6,7 +6,7 @@ from argparse import ArgumentParser parser = ArgumentParser() -parser.add_argument("--config", help="Path to the micro manager configuration file") +parser.add_argument("--config", required=True, help="Path to the micro manager configuration file") args = parser.parse_args() manager = MicroManager(args.config) From e76ff83da955eaeb22a239de6b81acab37d46049 Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Fri, 14 Apr 2023 15:47:23 +0200 Subject: [PATCH 25/87] Correct calling python file --- .github/workflows/run-macro-micro-dummy.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index ceab43cf..72b152d7 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -36,7 +36,7 @@ jobs: timeout-minutes: 3 run: | cd examples/ - python3 python-dummy/run_micro_manager_adaptivity.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py + python3 python-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py - name: Run c++ macro-micro dummy timeout-minutes: 3 @@ -51,4 +51,4 @@ jobs: timeout-minutes: 3 run: | cd examples/ - python3 cpp-dummy/run_micro_manager_adaptivity.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py + python3 cpp-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py From 9d856210dc3f6910ed3a345d74a3fb220e0ebd60 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sun, 16 Apr 2023 10:22:25 +0200 Subject: [PATCH 26/87] Set local and global ID of copied active simulation to the original IDs of the previously inactive simulation (#34) --- micro_manager/adaptivity.py | 6 ++++++ micro_manager/micro_manager.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index 5152bbd0..0e0d7393 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -161,11 +161,17 @@ def update_inactive_micro_sims( if self._check_for_activation(i, similarity_dists, _micro_sim_states): associated_active_id = micro_sims[i].get_associated_active_id() + # Get local and global ID of inactive simulation, to set it to the copied simulation later + local_id = micro_sims[i].get_local_id() + global_id = micro_sims[i].get_global_id() + # Effectively kill the micro sim object associated to the inactive ID micro_sims[i] = None # Make a copy of the associated active micro sim object micro_sims[i] = deepcopy(micro_sims[associated_active_id]) + micro_sims[i].set_local_id(local_id) + micro_sims[i].set_global_id(global_id) _micro_sim_states[i] = 1 return _micro_sim_states diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index d88c6cb9..70b5ad93 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -47,6 +47,12 @@ def get_local_id(self): def get_global_id(self): return self._global_id + def set_local_id(self, local_id): + self._local_id = local_id + + def set_global_id(self, global_id): + self._global_id = global_id + def activate(self): self._is_active = True From 860d8b4337d5c8b8c420f814826af383b945187d Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 17 Apr 2023 09:55:50 +0200 Subject: [PATCH 27/87] Delete cpp-dummy compiled file and micro-manager.log in the cleaning script --- examples/clean-example.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/clean-example.sh b/examples/clean-example.sh index 44fea7d9..aa44870d 100755 --- a/examples/clean-example.sh +++ b/examples/clean-example.sh @@ -1,3 +1,5 @@ rm -fv *.log rm -r -fv precice-run/ rm -fv *-events.json +rm -fv cpp-dummy/micro-manager.log +rm -fv cpp-dummy/micro_dummy.cpython-310-x86_64-linux-gnu.so From ffa869fa0a301c101234a3ed666eb714941d79bb Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 17 Apr 2023 10:15:04 +0200 Subject: [PATCH 28/87] Add pickling support to C++ solver dummy (#30) * [WIP] Add pickling support to C++ solver dummy * Working example of pickling and unpickling of C++ solver dummy * Use py::tuple instead of std::tuple in examples/cpp-dummy/micro_cpp_dummy.cpp Co-authored-by: erikscheurer <84399192+erikscheurer@users.noreply.github.com> * Use py::tuple instead of std::tuple in cpp solverdummy * Correcting wrongly removed code in cpp solver dummy * Remove template call to py::tuple * Remove unnecessary py::make_tuple call --------- Co-authored-by: erikscheurer <84399192+erikscheurer@users.noreply.github.com> --- examples/cpp-dummy/micro_cpp_dummy.cpp | 32 +++++++++++++++++++++++++- examples/cpp-dummy/micro_cpp_dummy.hpp | 3 +++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index a2335cae..b00cf281 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -69,6 +69,19 @@ void MicroSimulation::reload_checkpoint() _micro_scalar_data = _checkpoint; } +// This function needs to set the complete state of a micro simulation +void MicroSimulation::setState(double micro_scalar_data, double checkpoint) +{ + _micro_scalar_data = micro_scalar_data; + _checkpoint = checkpoint; +} + +// This function needs to return variables which can fully define the state of a micro simulation +py::tuple MicroSimulation::getState() const +{ + return py::make_tuple(_sim_id, _micro_scalar_data, _checkpoint); +} + PYBIND11_MODULE(micro_dummy, m) { // optional docstring m.doc() = "pybind11 micro dummy plugin"; @@ -78,5 +91,22 @@ PYBIND11_MODULE(micro_dummy, m) { .def("initialize", &MicroSimulation::initialize) .def("solve", &MicroSimulation::solve) .def("save_checkpoint", &MicroSimulation::save_checkpoint) - .def("reload_checkpoint", &MicroSimulation::reload_checkpoint); + .def("reload_checkpoint", &MicroSimulation::reload_checkpoint) + .def(py::pickle( + [](const MicroSimulation &ms) { // __getstate__ + /* Return a tuple that fully encodes the state of the object */ + return ms.getState(); + }, + [](py::tuple t) { // __setstate__ + if (t.size() != 3) + throw std::runtime_error("Invalid state!"); + + /* Create a new C++ instance */ + MicroSimulation ms(t[0].cast()); + + ms.setState(t[1].cast(), t[2].cast()); + + return ms; + } + )); } diff --git a/examples/cpp-dummy/micro_cpp_dummy.hpp b/examples/cpp-dummy/micro_cpp_dummy.hpp index 1a767adc..7c45d5ef 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.hpp +++ b/examples/cpp-dummy/micro_cpp_dummy.hpp @@ -21,6 +21,9 @@ class MicroSimulation void save_checkpoint(); void reload_checkpoint(); + void setState(double micro_scalar_data, double checkpoint); + py::tuple getState() const; + private: int _sim_id; double _micro_scalar_data; From cf9fee574a9541558cec5b46020e9aa4e0783cbe Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Mon, 17 Apr 2023 18:17:47 +0200 Subject: [PATCH 29/87] Add option for get and set state --- examples/cpp-dummy/micro_cpp_dummy.cpp | 19 +++++-------------- micro_manager/adaptivity.py | 16 +++++++++------- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index 3cd039c0..f595af5f 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -69,16 +69,6 @@ void MicroSimulation::reload_checkpoint() _micro_scalar_data = _checkpoint; } -// For adaptivity only: Need to be able to deepcopy the object -MicroSimulation MicroSimulation::__deepcopy__(py::dict memo) -{ - MicroSimulation new_sim(_sim_id); - new_sim._micro_scalar_data = _micro_scalar_data; - new_sim._micro_vector_data = _micro_vector_data; - new_sim._checkpoint = _checkpoint; - return new_sim; -} - // This function needs to set the complete state of a micro simulation void MicroSimulation::setState(double micro_scalar_data, double checkpoint) { @@ -89,7 +79,7 @@ void MicroSimulation::setState(double micro_scalar_data, double checkpoint) // This function needs to return variables which can fully define the state of a micro simulation py::tuple MicroSimulation::getState() const { - return py::make_tuple(_sim_id, _micro_scalar_data, _checkpoint); + return py::make_tuple(_micro_scalar_data, _checkpoint); } PYBIND11_MODULE(micro_dummy, m) { @@ -102,20 +92,21 @@ PYBIND11_MODULE(micro_dummy, m) { .def("solve", &MicroSimulation::solve) .def("save_checkpoint", &MicroSimulation::save_checkpoint) .def("reload_checkpoint", &MicroSimulation::reload_checkpoint) - .def("__deepcopy__", &MicroSimulation::__deepcopy__) + .def("get_state", &MicroSimulation::getState) + .def("set_state", &MicroSimulation::setState) .def(py::pickle( [](const MicroSimulation &ms) { // __getstate__ /* Return a tuple that fully encodes the state of the object */ return ms.getState(); }, [](py::tuple t) { // __setstate__ - if (t.size() != 3) + if (t.size() != 2) throw std::runtime_error("Invalid state!"); /* Create a new C++ instance */ MicroSimulation ms(t[0].cast()); - ms.setState(t[1].cast(), t[2].cast()); + ms.setState(t[0].cast(), t[1].cast()); return ms; } diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index 0e0d7393..3e76a3d9 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -165,13 +165,15 @@ def update_inactive_micro_sims( local_id = micro_sims[i].get_local_id() global_id = micro_sims[i].get_global_id() - # Effectively kill the micro sim object associated to the inactive ID - micro_sims[i] = None - - # Make a copy of the associated active micro sim object - micro_sims[i] = deepcopy(micro_sims[associated_active_id]) - micro_sims[i].set_local_id(local_id) - micro_sims[i].set_global_id(global_id) + # Copy state from associated active simulation with __getstate__ and __setstate__ if available else deepcopy + if hasattr(micro_sims[associated_active_id], 'get_state') and \ + hasattr(micro_sims[associated_active_id], 'set_state'): + micro_sims[i].set_state(*micro_sims[associated_active_id].get_state()) + else: + micro_sims[i] = None + micro_sims[i] = deepcopy(micro_sims[associated_active_id]) + micro_sims[i].set_local_id(local_id) + micro_sims[i].set_global_id(global_id) _micro_sim_states[i] = 1 return _micro_sim_states From 2839297e95dd7aee8ca58066d2877ab35724805a Mon Sep 17 00:00:00 2001 From: Erik Scheurer Date: Mon, 17 Apr 2023 18:31:29 +0200 Subject: [PATCH 30/87] Format --- micro_manager/adaptivity.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity.py index 3e76a3d9..37feb9d2 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity.py @@ -165,7 +165,8 @@ def update_inactive_micro_sims( local_id = micro_sims[i].get_local_id() global_id = micro_sims[i].get_global_id() - # Copy state from associated active simulation with __getstate__ and __setstate__ if available else deepcopy + # Copy state from associated active simulation with get_state and + # set_state if available else deepcopy if hasattr(micro_sims[associated_active_id], 'get_state') and \ hasattr(micro_sims[associated_active_id], 'set_state'): micro_sims[i].set_state(*micro_sims[associated_active_id].get_state()) From d110b55724b0229c5f9d29ef9748259ddedcf034 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 17 Apr 2023 20:29:47 +0200 Subject: [PATCH 31/87] Rectify check in setstate function --- examples/cpp-dummy/micro_cpp_dummy.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index b00cf281..0451b5cc 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -98,7 +98,7 @@ PYBIND11_MODULE(micro_dummy, m) { return ms.getState(); }, [](py::tuple t) { // __setstate__ - if (t.size() != 3) + if (t.size() != 2) throw std::runtime_error("Invalid state!"); /* Create a new C++ instance */ From 0a9022d89ff2a2b2710bdabcafb22b53ddf83f2c Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 18 Apr 2023 10:23:52 +0200 Subject: [PATCH 32/87] Micro Manager no longer passes local ID to micro simulation during object creation (#35) * Micro Manager no longer passes local ID to micro simulation * Move _sim_id from cpp_dummy * Corrections in pickling part of cpp-dummy --- README.md | 2 +- examples/cpp-dummy/micro_cpp_dummy.cpp | 18 +++++++++--------- examples/cpp-dummy/micro_cpp_dummy.hpp | 3 +-- examples/python-dummy/micro_dummy.py | 11 +++++------ micro_manager/micro_manager.py | 2 +- .../test_adaptivity/micro_solver.py | 3 +-- 6 files changed, 18 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 56477382..5b7848db 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ The Micro Manager facilitates two-scale coupling between one macro-scale simulat ### Steps to convert micro simulation code to a callable library -* Create a class called `MicroSimulation`. It is good practice to define class member variables in the class constructor `__init__`. The constructor of the class `MicroSimulation` gets an integer value from the Micro Manager which is the ID of the micro simulation from the perspective of the Micro Manager. This ID can be used later on for writing output. +* Create a class called `MicroSimulation`. It is good practice to define class member variables in the class constructor `__init__`. This constructor does not get any input. * **Optional**: Define a function `initialize` which computes the initial state of the micro simulation and returns initial values, which need to be transferred to the macro simulation. The return value needs to be a Python dictionary with the names of the quantities as keys and the values of the quantities as the dictionary values. * Create a function `solve`, which consists of all solving steps of one time step of a micro simulation or, if the micro problem is a steady-state simulation, all solving steps until the steady state is reached. `solve` should take a Python dictionary as an input, which has the name of the input data as keys and the corresponding data values as values. The `solve` function should return the quantities that need to be communicated to the macro-side. The return entity needs to again be a Python dictionary with the names of the quantities as keys and the values of the quantities as values. * If implicit coupling is required between the macro and all micro problems, then you can additionally define two functions `save_checkpoint` and `revert_to_checkpoint`. diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index 0451b5cc..0eee76bc 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -13,12 +13,12 @@ #include "micro_cpp_dummy.hpp" // Constructor -MicroSimulation::MicroSimulation(int sim_id) : _sim_id(sim_id), _micro_scalar_data(0), _checkpoint(0) {} +MicroSimulation::MicroSimulation() : _micro_scalar_data(0), _checkpoint(0) {} // Initialize void MicroSimulation::initialize() { - std::cout << "Initialize micro problem (" << _sim_id << ")\n"; + std::cout << "Initialize micro problem\n"; _micro_scalar_data = 0; _micro_vector_data.clear(); _checkpoint = 0; @@ -27,7 +27,7 @@ void MicroSimulation::initialize() // Solve py::dict MicroSimulation::solve(py::dict macro_data, double dt) { - std::cout << "Solve timestep of micro problem (" << _sim_id << ")\n"; + std::cout << "Solve timestep of micro problem\n"; //! Code below shows how to convert input macro data and use it in your C++ solver @@ -58,14 +58,14 @@ py::dict MicroSimulation::solve(py::dict macro_data, double dt) // Save Checkpoint -- only valid for implicit coupling void MicroSimulation::save_checkpoint() { - std::cout << "Saving state of micro problem (" << _sim_id << ")\n"; + std::cout << "Saving state of micro problem\n"; _checkpoint = _micro_scalar_data; } // Reload Checkpoint -- only valid for implicit coupling void MicroSimulation::reload_checkpoint() { - std::cout << "Reverting to old state of micro problem (" << _sim_id << ")\n"; + std::cout << "Reverting to old state of micro problem\n"; _micro_scalar_data = _checkpoint; } @@ -79,7 +79,7 @@ void MicroSimulation::setState(double micro_scalar_data, double checkpoint) // This function needs to return variables which can fully define the state of a micro simulation py::tuple MicroSimulation::getState() const { - return py::make_tuple(_sim_id, _micro_scalar_data, _checkpoint); + return py::make_tuple(_micro_scalar_data, _checkpoint); } PYBIND11_MODULE(micro_dummy, m) { @@ -87,7 +87,7 @@ PYBIND11_MODULE(micro_dummy, m) { m.doc() = "pybind11 micro dummy plugin"; py::class_(m, "MicroSimulation") - .def(py::init()) + .def(py::init()) .def("initialize", &MicroSimulation::initialize) .def("solve", &MicroSimulation::solve) .def("save_checkpoint", &MicroSimulation::save_checkpoint) @@ -102,9 +102,9 @@ PYBIND11_MODULE(micro_dummy, m) { throw std::runtime_error("Invalid state!"); /* Create a new C++ instance */ - MicroSimulation ms(t[0].cast()); + MicroSimulation ms; - ms.setState(t[1].cast(), t[2].cast()); + ms.setState(t[0].cast(), t[1].cast()); return ms; } diff --git a/examples/cpp-dummy/micro_cpp_dummy.hpp b/examples/cpp-dummy/micro_cpp_dummy.hpp index 7c45d5ef..6a865fa8 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.hpp +++ b/examples/cpp-dummy/micro_cpp_dummy.hpp @@ -14,7 +14,7 @@ namespace py = pybind11; class MicroSimulation { public: - MicroSimulation(int sim_id); + MicroSimulation(); void initialize(); // solve takes a python dict data, and the timestep dt as inputs, and returns a python dict py::dict solve(py::dict macro_write_data, double dt); @@ -25,7 +25,6 @@ class MicroSimulation py::tuple getState() const; private: - int _sim_id; double _micro_scalar_data; std::vector _micro_vector_data; double _checkpoint; diff --git a/examples/python-dummy/micro_dummy.py b/examples/python-dummy/micro_dummy.py index f3f76270..e1a2465e 100644 --- a/examples/python-dummy/micro_dummy.py +++ b/examples/python-dummy/micro_dummy.py @@ -6,24 +6,23 @@ class MicroSimulation: - def __init__(self, sim_id): + def __init__(self): """ Constructor of MicroSimulation class. """ - self._sim_id = sim_id self._dims = 3 self._micro_scalar_data = None self._micro_vector_data = None self._checkpoint = None def initialize(self): - print("Initialize micro problem ({})".format(self._sim_id)) + print("Initialize micro problem") self._micro_scalar_data = 0 self._micro_vector_data = [] self._checkpoint = 0 def solve(self, macro_data, dt): - print("Solve timestep of micro problem ({})".format(self._sim_id)) + print("Solve timestep of micro problem") assert dt != 0 self._micro_vector_data = [] self._micro_scalar_data = macro_data["macro-scalar-data"] + 1 @@ -34,9 +33,9 @@ def solve(self, macro_data, dt): "micro-vector-data": self._micro_vector_data.copy()} def save_checkpoint(self): - print("Saving state of micro problem ({})".format(self._sim_id)) + print("Saving state of micro problem") self._checkpoint = self._micro_scalar_data def reload_checkpoint(self): - print("Reverting to old state of micro problem ({})".format(self._sim_id)) + print("Reverting to old state of micro problem") self._micro_scalar_data = self._checkpoint diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 70b5ad93..981bb395 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -35,7 +35,7 @@ def create_micro_problem_class(base_micro_simulation): """ class MicroProblem(base_micro_simulation): def __init__(self, local_id, global_id): - base_micro_simulation.__init__(self, local_id) + base_micro_simulation.__init__(self) self._local_id = local_id self._global_id = global_id self._is_active = False # Simulation is created in an inactive state diff --git a/tests/integration/test_adaptivity/micro_solver.py b/tests/integration/test_adaptivity/micro_solver.py index ad9640ab..90807589 100644 --- a/tests/integration/test_adaptivity/micro_solver.py +++ b/tests/integration/test_adaptivity/micro_solver.py @@ -6,11 +6,10 @@ class MicroSimulation: - def __init__(self, sim_id): + def __init__(self): """ Constructor of MicroSimulation class. """ - self._sim_id = sim_id self._micro_scalar_data = None self._micro_vector_data = None self._checkpoint = None From 213fad84f4feddff0563ed308d284d3ec994a26a Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 15 May 2023 13:08:17 +0200 Subject: [PATCH 33/87] Refactor adaptivity functionality into a separate class structure (#38) * Make a copy of the active sim object which is associated to an inactive sim and put it in the list * Copy list of micro simulations in the association step and return the copied instance * Reduce total time of adaptivity integration test from 10 to 2 * Properly handling copying of active micro sim objects to the associated inactive ones * [WIP] Add option to toggle local or global adaptivity * [WIP] Implement local and global adaptivity in the initialization * [WIP] Handle global adaptivity for data reading * [WIP] Redesigning of the manager for global adaptivity * [WIP] Update micro sim states globally before working on micro sim objects locally * [WIP] Moving functionality to create MicroSimulation class outside of micro_manager.py * [WIP] Split adaptivity into two inherited classes for global and local functionality. (only skeleton code) * [WIP] Further refactoring of adaptivity code * [WIP] Streamlining * [WIP] Adding asnychronous receiving for the activation step * Add set methods to MicroSimulation class * Moving hashing in p2p operations to a separate function * Refactoring adaptivity * Formatting * Modify class call in unit tests * Major portion of refactoring, including refactoring of tests * Get the solver dummy to work with the new option * Formatting --- examples/micro-manager-adaptivity-config.json | 3 +- micro_manager/adaptivity/__init__.py | 0 micro_manager/adaptivity/adaptivity.py | 109 +++++++++ .../local_adaptivity.py} | 139 ++--------- micro_manager/config.py | 21 +- micro_manager/micro_manager.py | 226 ++++++++---------- micro_manager/micro_simulation.py | 106 ++++++++ .../test_adaptivity/micro-manager-config.json | 1 + tests/unit/test_adaptivity.py | 43 ++-- tests/unit/test_adaptivity_config.json | 1 + 10 files changed, 383 insertions(+), 266 deletions(-) create mode 100644 micro_manager/adaptivity/__init__.py create mode 100644 micro_manager/adaptivity/adaptivity.py rename micro_manager/{adaptivity.py => adaptivity/local_adaptivity.py} (52%) create mode 100644 micro_manager/micro_simulation.py diff --git a/examples/micro-manager-adaptivity-config.json b/examples/micro-manager-adaptivity-config.json index eb829ea1..68de39fc 100644 --- a/examples/micro-manager-adaptivity-config.json +++ b/examples/micro-manager-adaptivity-config.json @@ -8,7 +8,8 @@ }, "simulation_params": { "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], - "adaptivity": "True", + "adaptivity": "True", + "adaptivity_type": "local", "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], "adaptivity_history_param": 0.5, "adaptivity_coarsening_constant": 0.3, diff --git a/micro_manager/adaptivity/__init__.py b/micro_manager/adaptivity/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/micro_manager/adaptivity/adaptivity.py b/micro_manager/adaptivity/adaptivity.py new file mode 100644 index 00000000..fbaa46cd --- /dev/null +++ b/micro_manager/adaptivity/adaptivity.py @@ -0,0 +1,109 @@ +""" +Functionality for adaptive initialization and control of micro simulations +""" +import numpy as np + + +class AdaptivityCalculator: + def __init__(self, configurator, global_ids) -> None: + # Names of data to be used for adaptivity computation + self._refine_const = configurator.get_adaptivity_refining_const() + self._coarse_const = configurator.get_adaptivity_coarsening_const() + self._adaptivity_type = configurator.get_adaptivity_type() + self._coarse_tol = 0.0 + self._ref_tol = 0.0 + # Use set to make the "in" functionality faster for large lists + self._global_ids_of_local_sims = global_ids + + def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np.ndarray) -> np.ndarray: + """ + Calculate metric which determines if two micro simulations are similar enough to have one of them deactivated. + + Parameters + ---------- + dt : float + Time step + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + data : numpy array + Data to be used in similarity distance calculation + + Returns + ------- + similarity_dists : numpy array + Updated 2D array having similarity distances between each micro simulation pair + """ + _similarity_dists = np.copy(similarity_dists) + + if data.ndim == 1: + dim = 0 + elif data.ndim == 2: + _, dim = data.shape + + number_of_sims, _ = _similarity_dists.shape + + for counter_1, id_1 in enumerate(range(number_of_sims)): + for counter_2, id_2 in enumerate(range(number_of_sims)): + data_diff = 0 + if id_1 != id_2: + if dim: + for d in range(dim): + data_diff += abs(data[counter_1, d] - data[counter_2, d]) + else: + data_diff = abs(data[counter_1] - data[counter_2]) + + _similarity_dists[id_1, id_2] += dt * data_diff + else: + _similarity_dists[id_1, id_2] = 0 + + return _similarity_dists + + def _check_for_activation( + self, + inactive_id: int, + similarity_dists: np.ndarray, + micro_sim_states: np.ndarray) -> bool: + """ + Function to check if an inactive simulation needs to be activated + + Parameters + ---------- + inactive_id : int + ID of inactive simulation which is checked for activation + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation + """ + active_sim_ids = np.where(micro_sim_states == 1)[0] + + dists = similarity_dists[inactive_id, active_sim_ids] + + # If inactive sim is not similar to any active sim, activate it + return min(dists) > self._ref_tol + + def _check_for_deactivation( + self, + active_id: int, + similarity_dists: np.ndarray, + micro_sim_states: np.ndarray) -> bool: + """ + Function to check if an active simulation needs to be deactivated + + Parameters + ---------- + active_id : int + ID of active simulation which is checked for deactivation + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + micro_sim_states : numpy array + 1D array having state (active or inactive) of each micro simulation + """ + active_sim_ids = np.where(micro_sim_states == 1)[0] + + for active_id_2 in active_sim_ids: + if active_id != active_id_2: # don't compare active sim to itself + # If active sim is similar to another active sim, deactivate it + if similarity_dists[active_id, active_id_2] < self._coarse_tol: + return True + return False diff --git a/micro_manager/adaptivity.py b/micro_manager/adaptivity/local_adaptivity.py similarity index 52% rename from micro_manager/adaptivity.py rename to micro_manager/adaptivity/local_adaptivity.py index 37feb9d2..551da16d 100644 --- a/micro_manager/adaptivity.py +++ b/micro_manager/adaptivity/local_adaptivity.py @@ -1,70 +1,16 @@ """ -Functionality for adaptive initialization and control of micro simulations +Functionality for adaptive initialization and control of micro simulations locally within a rank (or the entire domain if the Micro Manager is run in serial) """ -import numpy as np import sys +import numpy as np from copy import deepcopy +from .adaptivity import AdaptivityCalculator -class AdaptiveController: - def __init__(self, configurator) -> None: - # Names of data to be used for adaptivity computation - self._refine_const = configurator.get_adaptivity_refining_const() - self._coarse_const = configurator.get_adaptivity_coarsening_const() - self._number_of_sims = 0 - self._coarse_tol = 0.0 - - def set_number_of_sims(self, number_of_sims: int) -> None: - """ - Setting number of simulations for the AdaptiveController object. - - Parameters - ---------- - number_of_sims : int - Number of micro simulations - """ - self._number_of_sims = number_of_sims - - def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np.ndarray) -> np.ndarray: - """ - Calculate metric which determines if two micro simulations are similar enough to have one of them deactivated. - - Parameters - ---------- - dt : float - Timestep - similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair - data : numpy array - Data to be used in similarity distance calculation - - Returns - ------- - similarity_dists : numpy array - Updated 2D array having similarity distances between each micro simulation pair - """ - _similarity_dists = np.copy(similarity_dists) - - if data.ndim == 1: - dim = 0 - elif data.ndim == 2: - _, dim = data.shape - - for counter_1, id_1 in enumerate(range(self._number_of_sims)): - for counter_2, id_2 in enumerate(range(self._number_of_sims)): - data_diff = 0 - if id_1 != id_2: - if dim: - for d in range(dim): - data_diff += abs(data[counter_1, d] - data[counter_2, d]) - else: - data_diff = abs(data[counter_1] - data[counter_2]) - - _similarity_dists[id_1, id_2] += dt * data_diff - else: - _similarity_dists[id_1, id_2] = 0 - - return _similarity_dists +class LocalAdaptivityCalculator(AdaptivityCalculator): + def __init__(self, configurator, global_ids, number_of_local_sims) -> None: + super().__init__(configurator, global_ids) + self._number_of_local_sims = number_of_local_sims def update_active_micro_sims( self, @@ -74,7 +20,6 @@ def update_active_micro_sims( """ Update set of active micro simulations. Active micro simulations are compared to each other and if found similar, one of them is deactivated. - Parameters ---------- similarity_dists : numpy array @@ -83,7 +28,6 @@ def update_active_micro_sims( 1D array having state (active or inactive) of each micro simulation micro_sims : list List of objects of class MicroProblem, which are the micro simulations - Returns ------- _micro_sim_states : numpy array @@ -94,7 +38,7 @@ def update_active_micro_sims( _micro_sim_states = np.copy(micro_sim_states) # Input micro_sim_states is not longer used after this point # Update the set of active micro sims - for i in range(self._number_of_sims): + for i in range(self._number_of_local_sims): if _micro_sim_states[i]: # if sim is active if self._check_for_deactivation(i, similarity_dists, _micro_sim_states): micro_sims[i].deactivate() @@ -102,32 +46,6 @@ def update_active_micro_sims( return _micro_sim_states - def _check_for_deactivation( - self, - active_id: int, - similarity_dists: np.ndarray, - micro_sim_states: np.ndarray) -> bool: - """ - Function to check if an active simulation needs to be deactivated - - Parameters - ---------- - active_id : int - ID of active simulation which is checked for deactivation - similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair - micro_sim_states : numpy array - 1D array having state (active or inactive) of each micro simulation - """ - active_sim_ids = np.where(micro_sim_states == 1)[0] - - for active_id_2 in active_sim_ids: - if active_id != active_id_2: # don't compare active sim to itself - # If active sim is similar to another active sim, deactivate it - if similarity_dists[active_id, active_id_2] < self._coarse_tol: - return True - return False - def update_inactive_micro_sims( self, similarity_dists: np.ndarray, @@ -136,7 +54,6 @@ def update_inactive_micro_sims( """ Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones and if it is not similar to any of them, it is activated. - Parameters ---------- similarity_dists : numpy array @@ -145,7 +62,6 @@ def update_inactive_micro_sims( 1D array having state (active or inactive) of each micro simulation micro_sims : list List of objects of class MicroProblem, which are the micro simulations - Returns ------- _micro_sim_states : numpy array @@ -156,10 +72,10 @@ def update_inactive_micro_sims( _micro_sim_states = np.copy(micro_sim_states) # Input micro_sim_states is not longer used after this point # Update the set of inactive micro sims - for i in range(self._number_of_sims): + for i in range(self._number_of_local_sims): if not _micro_sim_states[i]: # if id is inactive if self._check_for_activation(i, similarity_dists, _micro_sim_states): - associated_active_id = micro_sims[i].get_associated_active_id() + associated_active_local_id = micro_sims[i].get_associated_active_local_id() # Get local and global ID of inactive simulation, to set it to the copied simulation later local_id = micro_sims[i].get_local_id() @@ -167,42 +83,18 @@ def update_inactive_micro_sims( # Copy state from associated active simulation with get_state and # set_state if available else deepcopy - if hasattr(micro_sims[associated_active_id], 'get_state') and \ - hasattr(micro_sims[associated_active_id], 'set_state'): - micro_sims[i].set_state(*micro_sims[associated_active_id].get_state()) + if hasattr(micro_sims[associated_active_local_id], 'get_state') and \ + hasattr(micro_sims[associated_active_local_id], 'set_state'): + micro_sims[i].set_state(*micro_sims[associated_active_local_id].get_state()) else: micro_sims[i] = None - micro_sims[i] = deepcopy(micro_sims[associated_active_id]) + micro_sims[i] = deepcopy(micro_sims[associated_active_local_id]) micro_sims[i].set_local_id(local_id) micro_sims[i].set_global_id(global_id) _micro_sim_states[i] = 1 return _micro_sim_states - def _check_for_activation( - self, - inactive_id: int, - similarity_dists: np.ndarray, - micro_sim_states: np.ndarray) -> bool: - """ - Function to check if an inactive simulation needs to be activated - - Parameters - ---------- - inactive_id : int - ID of inactive simulation which is checked for activation - similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair - micro_sim_states : numpy array - 1D array having state (active or inactive) of each micro simulation - """ - active_sim_ids = np.where(micro_sim_states == 1)[0] - - dists = similarity_dists[inactive_id, active_sim_ids] - - # If inactive sim is not similar to any active sim, activate it - return min(dists) > self._ref_tol - def associate_inactive_to_active( self, similarity_dists: np.ndarray, @@ -232,4 +124,5 @@ def associate_inactive_to_active( associated_active_id = active_id dist_min = similarity_dists[inactive_id, active_id] - micro_sims[inactive_id].is_associated_to(associated_active_id) + micro_sims[inactive_id].is_associated_to_active_sim( + associated_active_id, self._global_ids_of_local_sims[associated_active_id]) diff --git a/micro_manager/config.py b/micro_manager/config.py index 14f2639f..06d4b6fc 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -36,6 +36,7 @@ def __init__(self, config_filename): self._output_micro_sim_time = False self._adaptivity = False + self._adaptivity_type = "local" self._data_for_adaptivity = dict() self._adaptivity_history_param = 0.5 self._adaptivity_coarsening_constant = 0.5 @@ -103,11 +104,19 @@ def read_json(self, config_filename): self._adaptivity = True elif data["simulation_params"]["adaptivity"] == "False": self._adaptivity = False + else: + raise Exception("Adaptivity can be either True or False.") except BaseException: - print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations " - "in all time steps.") + print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations in all time steps.") if self._adaptivity: + if data["simulation_params"]["adaptivity_type"] == "local": + self._adaptivity_type = "local" + elif data["simulation_params"]["adaptivity_type"] == "global": + self._adaptivity_type = "global" + else: + raise Exception("Adaptivity type can be either local or global.") + exchange_data = {**self._read_data_names, **self._write_data_names} for dname in data["simulation_params"]["adaptivity_data"]: self._data_for_adaptivity[dname] = exchange_data[dname] @@ -251,6 +260,14 @@ def turn_on_adaptivity(self): """ return self._adaptivity + def get_adaptivity_type(self): + """ + + Returns + ------- + """ + return self._adaptivity_type + def get_data_for_adaptivity(self): """ diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 981bb395..ddd9f19c 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -14,67 +14,12 @@ import time from .config import Config -from .adaptivity import AdaptiveController +from .micro_simulation import create_micro_problem_class +from .adaptivity.local_adaptivity import LocalAdaptivityCalculator sys.path.append(os.getcwd()) -def create_micro_problem_class(base_micro_simulation): - """ - Creates a class MicroProblem which inherits from the class of the micro simulation. - - Parameters - ---------- - base_micro_simulation : class - The base class from the micro simulation script. - - Returns - ------- - MicroProblem : class - Definition of class MicroProblem defined in this function. - """ - class MicroProblem(base_micro_simulation): - def __init__(self, local_id, global_id): - base_micro_simulation.__init__(self) - self._local_id = local_id - self._global_id = global_id - self._is_active = False # Simulation is created in an inactive state - self._associated_active_local_id = None - - def get_local_id(self): - return self._local_id - - def get_global_id(self): - return self._global_id - - def set_local_id(self, local_id): - self._local_id = local_id - - def set_global_id(self, global_id): - self._global_id = global_id - - def activate(self): - self._is_active = True - - def deactivate(self): - self._is_active = False - - def is_associated_to(self, similar_active_local_id): - assert not self._is_active, "Micro simulation {} is active and hence cannot be most similar to another active simulation".format( - self._global_id) - self._associated_active_local_id = similar_active_local_id - - def get_associated_active_id(self): - assert not self._is_active, "Micro simulation {} is active and hence cannot have a most similar active id".format( - self._global_id) - return self._associated_active_local_id - - def is_active(self): - return self._is_active - - return MicroProblem - - class MicroManager: def __init__(self, config_file: str) -> None: """ @@ -105,47 +50,52 @@ def __init__(self, config_file: str) -> None: self._micro_sims_have_output = False self._logger.info("Provided configuration file: {}".format(config_file)) - config = Config(config_file) + self._config = Config(config_file) # Define the preCICE interface - self._interface = precice.Interface("Micro-Manager", config.get_config_file_name(), self._rank, self._size) + self._interface = precice.Interface( + "Micro-Manager", + self._config.get_config_file_name(), + self._rank, + self._size) - micro_file_name = config.get_micro_file_name() + micro_file_name = self._config.get_micro_file_name() self._micro_problem = getattr(__import__(micro_file_name, fromlist=["MicroSimulation"]), "MicroSimulation") - self._macro_mesh_id = self._interface.get_mesh_id(config.get_macro_mesh_name()) + self._macro_mesh_id = self._interface.get_mesh_id(self._config.get_macro_mesh_name()) # Data names and ids of data written to preCICE - self._write_data_names = config.get_write_data_names() + self._write_data_names = self._config.get_write_data_names() self._write_data_ids = dict() for name in self._write_data_names.keys(): self._write_data_ids[name] = self._interface.get_data_id(name, self._macro_mesh_id) # Data names and ids of data read from preCICE - self._read_data_names = config.get_read_data_names() + self._read_data_names = self._config.get_read_data_names() self._read_data_ids = dict() for name in self._read_data_names.keys(): self._read_data_ids[name] = self._interface.get_data_id(name, self._macro_mesh_id) - self._data_used_for_adaptivity = dict() - - self._macro_bounds = config.get_macro_domain_bounds() - self._is_micro_solve_time_required = config.write_micro_solve_time() + self._macro_bounds = self._config.get_macro_domain_bounds() + self._is_micro_solve_time_required = self._config.write_micro_solve_time() self._local_number_of_micro_sims = None self._global_number_of_micro_sims = None self._is_rank_empty = False - self._micro_sims = None # Array carrying micro simulation objects self._dt = None self._mesh_vertex_ids = None # IDs of macro vertices as set by preCICE - self._micro_n_out = config.get_micro_output_n() + self._micro_n_out = self._config.get_micro_output_n() - self._is_adaptivity_on = config.turn_on_adaptivity() + self._is_adaptivity_on = self._config.turn_on_adaptivity() if self._is_adaptivity_on: - self._adaptivity_controller = AdaptiveController(config) - self._hist_param = config.get_adaptivity_hist_param() - self._adaptivity_data_names = config.get_data_for_adaptivity() + self._number_of_micro_sims_for_adaptivity = 0 + + self._data_for_similarity_calc = dict() + self._adaptivity_type = self._config.get_adaptivity_type() + + self._hist_param = self._config.get_adaptivity_hist_param() + self._adaptivity_data_names = self._config.get_data_for_adaptivity() # Names of macro data to be used for adaptivity computation self._adaptivity_macro_data_names = dict() @@ -157,7 +107,7 @@ def __init__(self, config_file: str) -> None: if name in self._write_data_names: self._adaptivity_micro_data_names[name] = is_data_vector - self._is_adaptivity_required_in_every_implicit_iteration = config.is_adaptivity_required_in_every_implicit_iteration() + self._is_adaptivity_required_in_every_implicit_iteration = self._config.is_adaptivity_required_in_every_implicit_iteration() self._micro_sims_active_steps = None def decompose_macro_domain(self, macro_bounds: list) -> list: @@ -228,16 +178,6 @@ def initialize(self) -> None: self._local_number_of_micro_sims, _ = mesh_vertex_coords.shape self._logger.info("Number of local micro simulations = {}".format(self._local_number_of_micro_sims)) - if self._is_adaptivity_on: - for name, is_data_vector in self._adaptivity_data_names.items(): - if is_data_vector: - self._data_used_for_adaptivity[name] = np.zeros( - (self._local_number_of_micro_sims, self._interface.get_dimensions())) - else: - self._data_used_for_adaptivity[name] = np.zeros((self._local_number_of_micro_sims)) - - self._adaptivity_controller.set_number_of_sims(self._local_number_of_micro_sims) - if self._local_number_of_micro_sims == 0: if self._is_parallel: self._logger.info( @@ -256,39 +196,70 @@ def initialize(self) -> None: # Get global number of micro simulations self._global_number_of_micro_sims = np.sum(nms_all_ranks) - # Create all micro simulations - sim_id = np.sum(nms_all_ranks[:self._rank]) + if self._is_adaptivity_on: + if self._adaptivity_type == "local": # Currently only local variant, global variant to follow + self._number_of_micro_sims_for_adaptivity = self._local_number_of_micro_sims - self._micro_sims = [] - self._micro_sim_global_ids = [] + for name, is_data_vector in self._adaptivity_data_names.items(): + if is_data_vector: + self._data_for_similarity_calc[name] = np.zeros( + (self._local_number_of_micro_sims, self._interface.get_dimensions())) + else: + self._data_for_similarity_calc[name] = np.zeros((self._local_number_of_micro_sims)) + + # Create lists of local and global IDs + sim_id = np.sum(nms_all_ranks[:self._rank]) + self._global_ids_of_local_sims = [] # DECLARATION for i in range(self._local_number_of_micro_sims): - self._micro_sims.append(create_micro_problem_class(self._micro_problem)(i, sim_id)) - self._micro_sim_global_ids.append(sim_id) + self._global_ids_of_local_sims.append(sim_id) sim_id += 1 + if self._is_adaptivity_on: + self._micro_sims = [None] * self._number_of_micro_sims_for_adaptivity # DECLARATION + if self._adaptivity_type == "local": + self._adaptivity_controller = LocalAdaptivityCalculator( + self._config, self._global_ids_of_local_sims, self._local_number_of_micro_sims) + # If adaptivity is calculated locally, IDs to iterate over are local + for i in range(self._local_number_of_micro_sims): + self._micro_sims[i] = create_micro_problem_class( + self._micro_problem)(i, self._global_ids_of_local_sims[i]) + + micro_sim_is_on_rank = [None] * self._local_number_of_micro_sims + for i in self._global_ids_of_local_sims: + micro_sim_is_on_rank[i] = self._rank + + self._micro_sim_is_on_rank = self._comm.allgather(micro_sim_is_on_rank) # DECLARATION + else: + self._micro_sims = [] # DECLARATION + for i in range(self._local_number_of_micro_sims): + self._micro_sims.append( + create_micro_problem_class( + self._micro_problem)( + i, self._global_ids_of_local_sims[i])) + micro_sims_output = list(range(self._local_number_of_micro_sims)) self._micro_sims_active_steps = np.zeros(self._local_number_of_micro_sims) # Initialize micro simulations if initialize() method exists if hasattr(self._micro_problem, 'initialize') and callable(getattr(self._micro_problem, 'initialize')): - for i in range(self._local_number_of_micro_sims): - micro_sims_output[i] = self._micro_sims[i].initialize() - if micro_sims_output[i] is not None: + for counter, i in enumerate(range(self._local_number_of_micro_sims)): + micro_sims_output[counter] = self._micro_sims[i].initialize() + if micro_sims_output[counter] is not None: if self._is_micro_solve_time_required: - micro_sims_output[i]["micro_sim_time"] = 0.0 + micro_sims_output[counter]["micro_sim_time"] = 0.0 if self._is_adaptivity_on: - micro_sims_output[i]["active_state"] = 0 - micro_sims_output[i]["active_steps"] = 0 + micro_sims_output[counter]["active_state"] = 0 + micro_sims_output[counter]["active_steps"] = 0 else: - micro_sims_output[i] = dict() + micro_sims_output[counter] = dict() for name, is_data_vector in self._write_data_names.items(): if is_data_vector: - micro_sims_output[i][name] = np.zeros(self._interface.get_dimensions()) + micro_sims_output[counter][name] = np.zeros(self._interface.get_dimensions()) else: - micro_sims_output[i][name] = 0.0 + micro_sims_output[counter][name] = 0.0 self._logger.info("Micro simulations with global IDs {} - {} initialized.".format( - self._micro_sim_global_ids[0], self._micro_sim_global_ids[-1])) + self._global_ids_of_local_sims[0], self._global_ids_of_local_sims[-1])) self._micro_sims_have_output = False if hasattr(self._micro_problem, 'output') and callable(getattr(self._micro_problem, 'output')): @@ -325,11 +296,11 @@ def read_data_from_precice(self) -> list: if self._is_adaptivity_on: if name in self._adaptivity_macro_data_names: - self._data_used_for_adaptivity[name] = read_data[name] + self._data_for_similarity_calc[name] = read_data[name] - local_read_data = [dict(zip(read_data, t)) for t in zip(*read_data.values())] + read_data = [dict(zip(read_data, t)) for t in zip(*read_data.values())] - return local_read_data + return read_data def write_data_to_precice(self, micro_sims_output: list) -> None: """ @@ -367,7 +338,7 @@ def write_data_to_precice(self, micro_sims_output: list) -> None: def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_nm1: np.ndarray): """ - Compute adaptivity based on similarity distances and micro simulation states from t_{n-1} + Compute adaptivity locally based on similarity distances and micro simulation states from t_{n-1} Parameters ---------- @@ -375,7 +346,7 @@ def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_ similarity_dists_nm1 : numpy array 2D array having similarity distances between each micro simulation pair at t_{n-1} micro_sim_states_nm1 : numpy array - 1D array having state (active or inactive) of each micro simulation at t_{n-1} + 1D array having state (active or inactive) of each micro simulation at t_{n-1} on this rank Results ------- @@ -388,22 +359,27 @@ def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_ similarity_dists_n = exp(-self._hist_param * self._dt) * similarity_dists_nm1 for name, _ in self._adaptivity_data_names.items(): + # For global adaptivity, similarity distance matrix is calculated globally on every rank similarity_dists_n = self._adaptivity_controller.get_similarity_dists( - self._dt, similarity_dists_n, self._data_used_for_adaptivity[name]) + self._dt, similarity_dists_n, self._data_for_similarity_calc[name]) micro_sim_states_n = self._adaptivity_controller.update_active_micro_sims( similarity_dists_n, micro_sim_states_nm1, self._micro_sims) micro_sim_states_n = self._adaptivity_controller.update_inactive_micro_sims( - similarity_dists_n, micro_sim_states_n, self._micro_sims) + similarity_dists_n, micro_sim_states_nm1, self._micro_sims) self._adaptivity_controller.associate_inactive_to_active( similarity_dists_n, micro_sim_states_n, self._micro_sims) - assert np.any(micro_sim_states_n), "There are no active simulations, which is not possible." - - self._logger.info("Number of active micro simulations = {}".format(np.count_nonzero(micro_sim_states_n == 1))) - self._logger.info("Number of inactive micro simulations = {}".format(np.count_nonzero(micro_sim_states_n == 0))) + self._logger.info( + "Number of active micro simulations = {}".format( + np.count_nonzero( + micro_sim_states_n == 1))) + self._logger.info( + "Number of inactive micro simulations = {}".format( + np.count_nonzero( + micro_sim_states_n == 0))) return similarity_dists_n, micro_sim_states_n @@ -446,25 +422,22 @@ def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.n for name in self._adaptivity_micro_data_names: # Collect micro sim output for adaptivity - self._data_used_for_adaptivity[name][active_id] = micro_sims_output[active_id][name] + self._data_for_similarity_calc[name][active_id] = micro_sims_output[active_id][name] if self._is_micro_solve_time_required: micro_sims_output[active_id]["micro_sim_time"] = end_time - start_time # For each inactive simulation, copy data from most similar active simulation for inactive_id in inactive_sim_ids: - # self._logger.info("Micro sim [{}] is inactive. Copying data from most similar active micro sim [{}]".format( - # self._micro_sims[inactive_id].get_global_id(), - # self._micro_sim_global_ids[self._micro_sims[inactive_id].get_associated_active_id()])) - micro_sims_output[inactive_id] = dict() - for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_associated_active_id()].items(): + for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_associated_active_local_id()].items( + ): micro_sims_output[inactive_id][dname] = values if self._is_adaptivity_on: for name in self._adaptivity_micro_data_names: # Collect micro sim output for adaptivity - self._data_used_for_adaptivity[name][inactive_id] = micro_sims_output[inactive_id][name] + self._data_for_similarity_calc[name][inactive_id] = micro_sims_output[inactive_id][name] micro_sims_output[inactive_id]["active_state"] = 0 micro_sims_output[inactive_id]["active_steps"] = self._micro_sims_active_steps[inactive_id] @@ -480,20 +453,25 @@ def solve(self): """ t, n = 0, 0 t_checkpoint, n_checkpoint = 0, 0 - similarity_dists = np.zeros((self._local_number_of_micro_sims, self._local_number_of_micro_sims)) + micro_sim_states = np.ones((self._local_number_of_micro_sims)) # By default all sims are active if self._is_adaptivity_on: + similarity_dists = np.zeros( + (self._number_of_micro_sims_for_adaptivity, + self._number_of_micro_sims_for_adaptivity)) # Start adaptivity calculation with all sims inactive - micro_sim_states = np.zeros((self._local_number_of_micro_sims)) + micro_sim_states = np.zeros((self._number_of_micro_sims_for_adaptivity)) # If all sims are inactive, activate the first one (a random choice) self._micro_sims[0].activate() micro_sim_states[0] = 1 # All inactive sims are associated to the one active sim - for i in range(1, self._local_number_of_micro_sims): - self._micro_sims[i].is_associated_to(0) + for i in range(1, self._number_of_micro_sims_for_adaptivity): + self._micro_sims[i].is_associated_to_active_sim(0, self._global_ids_of_local_sims[0]) + self._micro_sims[0].is_associated_to_inactive_sims(range( + 1, self._number_of_micro_sims_for_adaptivity), self._global_ids_of_local_sims[1:self._local_number_of_micro_sims - 1]) similarity_dists_cp = None micro_sim_states_cp = None @@ -508,7 +486,9 @@ def solve(self): if self._is_adaptivity_on: if not self._is_adaptivity_required_in_every_implicit_iteration: - similarity_dists, micro_sim_states = self.compute_adaptivity(similarity_dists, micro_sim_states) + if self._adaptivity_type == "local": + similarity_dists, micro_sim_states = self.compute_adaptivity( + similarity_dists, micro_sim_states) # Only do checkpointing if adaptivity is computed once in every time window similarity_dists_cp = np.copy(similarity_dists) diff --git a/micro_manager/micro_simulation.py b/micro_manager/micro_simulation.py new file mode 100644 index 00000000..e6c9b16a --- /dev/null +++ b/micro_manager/micro_simulation.py @@ -0,0 +1,106 @@ +""" +Functionality to create MicroSimulation class objects which inherit from user provided base_micro_simulation class. +""" + + +def create_micro_problem_class(base_micro_simulation): + """ + Creates a class MicroSimulation which inherits from the class of the micro simulation. + + Parameters + ---------- + base_micro_simulation : class + The base class from the micro simulation script. + + Returns + ------- + MicroSimulation : class + Definition of class MicroSimulation defined in this function. + """ + class MicroSimulation(base_micro_simulation): + def __init__(self, local_id, global_id): + base_micro_simulation.__init__(self) + self._local_id = local_id + self._global_id = global_id + self._is_active = False # Simulation is created in an inactive state + + # Only defined when simulation is inactive + self._associated_active_local_id = None + self._associated_active_global_id = None + + # Only defined when simulation is active + self._associated_inactive_local_ids = None + self._associated_inactive_global_ids = None + + def get_local_id(self) -> int: + return self._local_id + + def get_global_id(self) -> int: + return self._global_id + + def set_local_id(self, local_id) -> None: + self._local_id = local_id + + def set_global_id(self, global_id) -> None: + self._global_id = global_id + + def activate(self) -> None: + self._is_active = True + + def deactivate(self) -> None: + self._is_active = False + + def is_active(self) -> bool: + return self._is_active + + def is_associated_to_active_sim(self, similar_active_local_id: int, similar_active_global_id: int) -> None: + assert not self._is_active, "Micro simulation {} is active and hence cannot be associated to another active simulation".format( + self._global_id) + self._associated_active_local_id = similar_active_local_id + self._associated_active_global_id = similar_active_global_id + + def get_associated_active_local_id(self) -> int: + assert not self._is_active, "Micro simulation {} is active and hence cannot have an associated active local ID".format( + self._global_id) + return self._associated_active_local_id + + def get_associated_active_global_id(self) -> int: + assert not self._is_active, "Micro simulation {} is active and hence cannot have an associated active global ID".format( + self._global_id) + return self._associated_active_global_id + + def is_associated_to_inactive_sim(self, similar_inactive_local_id: int, + similar_inactive_global_id: int) -> None: + assert self._is_active, "Micro simulation {} is inactive and hence cannot be associated to an inactive simulation".format( + self._global_id) + self._associated_inactive_local_ids.append(similar_inactive_local_id) + self._associated_inactive_global_ids.append(similar_inactive_global_id) + + def is_associated_to_inactive_sims(self, similar_inactive_local_ids: list, + similar_inactive_global_ids: list) -> None: + assert self._is_active, "Micro simulation {} is inactive and hence cannot be associated to inactive simulations".format( + self._global_id) + self._associated_inactive_local_ids = similar_inactive_local_ids + self._associated_inactive_global_ids = similar_inactive_global_ids + + def get_associated_inactive_local_id(self) -> int: + assert self._is_active, "Micro simulation {} is inactive and hence cannot have an associated inactive local ID".format( + self._global_id) + return self._associated_inactive_local_ids[0] + + def get_associated_inactive_global_id(self) -> int: + assert self._is_active, "Micro simulation {} is inactive and hence cannot have an associated inactive global ID".format( + self._global_id) + return self._associated_inactive_global_ids[0] + + def get_associated_inactive_local_ids(self) -> list: + assert self._is_active, "Micro simulation {} is inactive and hence cannot have associated inactive local IDs".format( + self._global_id) + return self._associated_inactive_local_ids + + def get_associated_inactive_global_ids(self) -> list: + assert self._is_active, "Micro simulation {} is active and hence cannot have associated inactive global IDs".format( + self._global_id) + return self._associated_inactive_global_ids + + return MicroSimulation diff --git a/tests/integration/test_adaptivity/micro-manager-config.json b/tests/integration/test_adaptivity/micro-manager-config.json index c2e86fb5..400754e9 100644 --- a/tests/integration/test_adaptivity/micro-manager-config.json +++ b/tests/integration/test_adaptivity/micro-manager-config.json @@ -9,6 +9,7 @@ "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], "adaptivity": "True", + "adaptivity_type": "local", "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], "adaptivity_history_param": 0.5, "adaptivity_coarsening_constant": 0.3, diff --git a/tests/unit/test_adaptivity.py b/tests/unit/test_adaptivity.py index 6d4c306d..c621c575 100644 --- a/tests/unit/test_adaptivity.py +++ b/tests/unit/test_adaptivity.py @@ -1,5 +1,5 @@ from unittest import TestCase -from micro_manager.adaptivity import AdaptiveController +from micro_manager.adaptivity.local_adaptivity import LocalAdaptivityCalculator from micro_manager.config import Config import numpy as np @@ -7,7 +7,8 @@ class TestAdaptivity(TestCase): def setUp(self): - self._adaptivity_controller = AdaptiveController(Config("./tests/unit/test_adaptivity_config.json")) + self._adaptivity_controller = LocalAdaptivityCalculator( + Config("./tests/unit/test_adaptivity_config.json"), range(5), 5) self._number_of_sims = 5 self._dt = 0.1 self._dim = 3 @@ -43,12 +44,7 @@ def setUp(self): self._coarse_const = 0.5 self._coarse_tol = 0.2 - def test_set_number_of_sims(self): - self._adaptivity_controller.set_number_of_sims(self._number_of_sims) - self.assertEqual(self._number_of_sims, self._adaptivity_controller._number_of_sims) - def test_get_similarity_dists(self): - self._adaptivity_controller._number_of_sims = self._number_of_sims expected_similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) for i in range(self._number_of_sims): for j in range(self._number_of_sims): @@ -72,7 +68,6 @@ def test_get_similarity_dists(self): self.assertTrue(np.array_equal(expected_similarity_dists, actual_similarity_dists)) def test_update_active_micro_sims(self): - self._adaptivity_controller._number_of_sims = self._number_of_sims # Third and fifth micro sim are active, rest are deactivate expected_micro_sim_states = np.array([0, 0, 1, 0, 1]) @@ -102,7 +97,6 @@ def deactivate(self): self.assertTrue(np.array_equal(expected_micro_sim_states, actual_micro_sim_states)) def test_update_inactive_micro_sims(self): - self._adaptivity_controller._number_of_sims = self._number_of_sims # Third and fifth micro sim are active, rest are deactivate expected_micro_sim_states = np.array([0, 1, 0, 1, 0]) @@ -122,6 +116,21 @@ class MicroSimulation(): def activate(self): pass + def get_local_id(self): + return 1 + + def get_global_id(self): + return 1 + + def set_local_id(self, local_id): + pass + + def set_global_id(self, global_id): + pass + + def get_associated_active_local_id(self): + return 1 + dummy_micro_sims = [] for i in range(self._number_of_sims): dummy_micro_sims.append(MicroSimulation()) @@ -132,7 +141,6 @@ def activate(self): self.assertTrue(np.array_equal(expected_micro_sim_states, actual_micro_sim_states)) def test_associate_active_to_inactive(self): - self._adaptivity_controller._number_of_sims = self._number_of_sims micro_sim_states = np.array([0, 0, 1, 0, 1]) similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) @@ -146,11 +154,12 @@ def test_associate_active_to_inactive(self): similarity_dists[i, j] = self._dt * similarity_dist class MicroSimulation(): - def is_most_similar_to(self, similar_active_id): - self._most_similar_active_id = similar_active_id + def is_associated_to_active_sim(self, local_id, global_id): + self._associated_active_local_id = local_id + self._associated_active_global_id = global_id - def get_most_similar_active_id(self): - return self._most_similar_active_id + def get_associated_active_local_id(self): + return self._associated_active_local_id dummy_micro_sims = [] for i in range(self._number_of_sims): @@ -158,6 +167,6 @@ def get_most_similar_active_id(self): self._adaptivity_controller.associate_inactive_to_active(similarity_dists, micro_sim_states, dummy_micro_sims) - self.assertEqual(dummy_micro_sims[0].get_most_similar_active_id(), 2) - self.assertEqual(dummy_micro_sims[1].get_most_similar_active_id(), 2) - self.assertEqual(dummy_micro_sims[3].get_most_similar_active_id(), 4) + self.assertEqual(dummy_micro_sims[0].get_associated_active_local_id(), 2) + self.assertEqual(dummy_micro_sims[1].get_associated_active_local_id(), 2) + self.assertEqual(dummy_micro_sims[3].get_associated_active_local_id(), 4) diff --git a/tests/unit/test_adaptivity_config.json b/tests/unit/test_adaptivity_config.json index 00755b63..e629573f 100644 --- a/tests/unit/test_adaptivity_config.json +++ b/tests/unit/test_adaptivity_config.json @@ -9,6 +9,7 @@ "simulation_params": { "macro_domain_bounds": [], "adaptivity": "True", + "adaptivity_type": "local", "adaptivity_data": [], "adaptivity_history_param": 0.5, "adaptivity_coarsening_constant": 0.3, From 3476db43b62743c0c43d31dc47eafc4068e87070 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Fri, 19 May 2023 16:19:25 +0200 Subject: [PATCH 34/87] Change branches to run tests on push (#43) * Change push-test branches to main and develop * Remove quotes from branches * Remove quotes for explicit branch names --------- Co-authored-by: Ishaan Desai --- .github/workflows/check-pep8.yml | 3 ++- .github/workflows/run-adaptivity-test.yml | 3 ++- .github/workflows/run-macro-micro-dummy.yml | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.github/workflows/check-pep8.yml b/.github/workflows/check-pep8.yml index 6f6e71bc..46ef5313 100644 --- a/.github/workflows/check-pep8.yml +++ b/.github/workflows/check-pep8.yml @@ -2,7 +2,8 @@ name: autopep8 on: push: branches: - - "*" + - main + - develop pull_request: branches: - "*" diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index 66dbdca0..6845570c 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -2,7 +2,8 @@ name: Run tests for adaptivity on: push: branches: - - "*" + - main + - develop pull_request: branches: - "*" diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 72b152d7..3ce1ab9f 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -2,7 +2,8 @@ name: Run macro-micro dummy case on: push: branches: - - "*" + - main + - develop pull_request: branches: - "*" From 23946fa08ac7f5b304705141f708d559903728dc Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 23 May 2023 12:50:53 +0200 Subject: [PATCH 35/87] New domain decomposition strategy based on user input of number of processors along each axis (#41) * New domain decomposition strategy based on user input of number of procs along each axis * Add CI to run solver dummy in parallel in two different scenarios * Remove uses tag from the step to actually run the solver dummy in parallel * Install sudo becuase setup-mpi.sh needs it via the mpi4py action * Run mpiexec commands in the solver dummy Action using su -c to avoid sudo user * Move chown command to just before when mpiexec is run * Remove chown command to find out what happens * Revert to original state to figure out the problem * Use precice user instead of root for the mpiexec commands in Actions * Use precice user instead of root for the mpiexec commands in Actions (second try) * Hardcode precice user while running mpiexec in Action * Create a user precice and then use it to run mpiexec commands * Just add user precice, without creating a home dir or specific shell * Commands are in run and not in uses part of GitHub Actions yml * Reduce number of MPI processes to 2 because GitHub CI complains * Split workflows to run solver dummy in serial and parallel variants * Formatting newly separated serial and parallel ways of runnning the dummy in the CI * Install sudo before installing mpi4py in the parallel dummy run Action * Move apt-get update command to before the installation of sudo * Remove flags in the non-default shell * Remove -e flag from non-default bash shell * Inidividually add su -c to every command * Dont add su -c for apt-get commands (obviously) * Use user precice while installing the Micro Manager * Try oversubscribing number of MPI procs in parallel dummy run * Move domain decomposition functionality to a separate class in a separate file * Add tests for domain decomposition functionality * Use new domain decomposition functionality in the Micro Manager * Domain decomposition only for parallel runs (obviously) * Restructuring tests and running domain decomposition integration test with unit cube instead of macro dummy * Solver dummy is now run only in serial * Add incorrectly deleted micro-mananger-config.json for solver dummy * Add adaptivity variant config for solver dummy CI and add timeout for domain decomposition tests * Change path to micro-manager config file for domain decomp integration tests * Add --oversubscribe flag to MPI run with 6 procs * Remove unnecessary math.sqrt import --- .github/workflows/run-adaptivity-test.yml | 6 +- .../run-domain-decomposition-tests.yml | 67 +++++++++++++++ .github/workflows/run-macro-micro-dummy.yml | 3 +- micro_manager/config.py | 17 ++++ micro_manager/domain_decomposition.py | 81 +++++++++++++++++++ micro_manager/micro_manager.py | 51 ++---------- .../test_adaptivity/run_micro_manager.py | 11 --- .../clean-test.sh | 0 .../micro-manager-config-adaptivity.json} | 2 +- .../micro-manager-config-parallel-1.json | 17 ++++ .../micro-manager-config-parallel-2.json | 17 ++++ .../micro_dummy.py} | 0 .../precice-config.xml | 0 .../test_unit_cube_dummy/run_micro_manager.py | 16 ++++ .../unit_cube_macro.py} | 0 tests/unit/test_domain_decomposition.py | 41 ++++++++++ 16 files changed, 270 insertions(+), 59 deletions(-) create mode 100644 .github/workflows/run-domain-decomposition-tests.yml create mode 100644 micro_manager/domain_decomposition.py delete mode 100644 tests/integration/test_adaptivity/run_micro_manager.py rename tests/integration/{test_adaptivity => test_unit_cube_dummy}/clean-test.sh (100%) rename tests/integration/{test_adaptivity/micro-manager-config.json => test_unit_cube_dummy/micro-manager-config-adaptivity.json} (95%) create mode 100644 tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json create mode 100644 tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json rename tests/integration/{test_adaptivity/micro_solver.py => test_unit_cube_dummy/micro_dummy.py} (100%) rename tests/integration/{test_adaptivity => test_unit_cube_dummy}/precice-config.xml (100%) create mode 100644 tests/integration/test_unit_cube_dummy/run_micro_manager.py rename tests/integration/{test_adaptivity/macro_solver.py => test_unit_cube_dummy/unit_cube_macro.py} (100%) create mode 100644 tests/unit/test_domain_decomposition.py diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index 6845570c..a16fdd46 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -25,8 +25,8 @@ jobs: run: pip3 install --user . - name: Run integration test run: | - cd tests/integration/test_adaptivity/ - python3 macro_solver.py & python3 run_micro_manager.py + cd tests/integration/test_unit_cube_dummy/ + python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-adaptivity.json adaptivity_unit_tests: name: Run adaptivity unit tests @@ -44,4 +44,4 @@ jobs: - name: Install Micro Manager run: pip3 install --user . - name: Run unit tests - run: python3 -m unittest + run: python3 -m unittest tests/unit/test_adaptivity.py diff --git a/.github/workflows/run-domain-decomposition-tests.yml b/.github/workflows/run-domain-decomposition-tests.yml new file mode 100644 index 00000000..13465c83 --- /dev/null +++ b/.github/workflows/run-domain-decomposition-tests.yml @@ -0,0 +1,67 @@ +name: Run tests for domain decomposition +on: + push: + branches: + - main + - develop + pull_request: + branches: + - "*" +jobs: + domain_decomposition_integration_test: + name: Run domain decomposition integration tests + runs-on: ubuntu-latest + container: precice/precice + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Install sudo for MPI + run: | + apt-get -qq update + apt-get -qq install sudo + + - name: Use mpi4py + uses: mpi4py/setup-mpi@v1 + + - name: Add user precice + run: useradd -m -s /bin/bash precice + + - name: Install Dependencies + run: | + apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config + su -c "python -m pip install --upgrade pip" precice + su -c "pip install setuptools wheel twine" precice + + - name: Install micro-manager + run: su -c "pip3 install --user ." precice + + - name: Run integration test (variant 1) + timeout-minutes: 3 + run: | + chown -R precice tests/integration/test_unit_cube_dummy/ + cd tests/integration/test_unit_cube_dummy/ + su -c "mpiexec -n 2 python3 run_micro_manager.py --config micro-manager-config-parallel-1.json & python3 unit_cube_macro.py" precice + + - name: Run integration test (variant 2) + timeout-minutes: 3 + run: | + cd tests/integration/test_unit_cube_dummy/ + su -c "mpiexec -n 6 --oversubscribe python3 run_micro_manager.py --config micro-manager-config-parallel-2.json & python3 unit_cube_macro.py" precice + + domain_decomposition_unit_tests: + name: Run domain decomposition unit tests + runs-on: ubuntu-latest + container: precice/precice + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + - name: Install Dependencies + run: | + apt-get -qq update + apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config + python -m pip install --upgrade pip + pip install setuptools wheel + - name: Install Micro Manager + run: pip3 install --user . + - name: Run unit tests + run: python3 -m unittest tests/unit/test_domain_decomposition.py diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 3ce1ab9f..74c056a3 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -1,4 +1,4 @@ -name: Run macro-micro dummy case +name: Run macro-micro dummy on: push: branches: @@ -21,6 +21,7 @@ jobs: run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config + apt-get -qq install sudo python -m pip install --upgrade pip pip install setuptools wheel twine diff --git a/micro_manager/config.py b/micro_manager/config.py index 06d4b6fc..9895a21b 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -30,6 +30,7 @@ def __init__(self, config_filename): self._write_data_names = dict() self._macro_domain_bounds = None + self._ranks_per_axis = None self._micro_output_n = 1 self._diagnostics_data_names = dict() @@ -93,6 +94,11 @@ def read_json(self, config_filename): self._macro_domain_bounds = data["simulation_params"]["macro_domain_bounds"] + try: + self._ranks_per_axis = data["simulation_params"]["axiswise_ranks"] + except BaseException: + print("Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") + try: self._micro_output_n = data["simulation_params"]["micro_output_n"] except BaseException: @@ -218,6 +224,17 @@ def get_macro_domain_bounds(self): """ return self._macro_domain_bounds + def get_ranks_per_axis(self): + """ + Get the ranks per axis for a parallel simulation + + Returns + ------- + ranks_per_axis : list + List containing ranks in the x, y and z axis respectively. + """ + return self._ranks_per_axis + def get_micro_file_name(self): """ Get the path to the Python script of the micro-simulation. diff --git a/micro_manager/domain_decomposition.py b/micro_manager/domain_decomposition.py new file mode 100644 index 00000000..41c8c7b5 --- /dev/null +++ b/micro_manager/domain_decomposition.py @@ -0,0 +1,81 @@ +""" +Functionality to partition the macro domain according to the user provided partitions in each axis +""" + +import numpy as np + + +class DomainDecomposer: + def __init__(self, logger, dims, rank, size) -> None: + self._logger = logger + self._rank = rank + self._size = size + self._dims = dims + + def decompose_macro_domain(self, macro_bounds: list, ranks_per_axis: list) -> list: + """ + Decompose the macro domain equally among all ranks, if the Micro Manager is run in parallel. + + Parameters + ---------- + macro_bounds : list + List containing upper and lower bounds of the macro domain. + Format in 2D is [x_min, x_max, y_min, y_max] + Format in 2D is [x_min, x_max, y_min, y_max, z_min, z_max] + ranks_per_axis : list + List containing axis wise ranks for a parallel run + Format in 2D is [ranks_x, ranks_y] + Format in 2D is [ranks_x, ranks_y, ranks_z] + + Returns + ------- + mesh_bounds : list + List containing the upper and lower bounds of the domain pertaining to this rank. + Format is same as input parameter macro_bounds. + """ + assert np.prod( + ranks_per_axis) == self._size, "Total number of processors provided in the Micro Manager configuration and in the MPI execution command do not match." + + dx = [] + for d in range(self._dims): + dx.append(abs(macro_bounds[d * 2 + 1] - macro_bounds[d * 2]) / ranks_per_axis[d]) + + rank_in_axis: list[int] = [None] * self._dims + if ranks_per_axis[0] == 1: # if serial in x axis + rank_in_axis[0] = 0 + else: + rank_in_axis[0] = self._rank % ranks_per_axis[0] # x axis + + if self._dims == 2: + if ranks_per_axis[1] == 1: # if serial in y axis + rank_in_axis[1] = 0 + else: + rank_in_axis[1] = int(self._rank / ranks_per_axis[0]) # y axis + elif self._dims == 3: + if ranks_per_axis[2] == 1: # if serial in z axis + rank_in_axis[2] = 0 + else: + rank_in_axis[2] = int(self._rank / (ranks_per_axis[0] * ranks_per_axis[1])) # z axis + + if ranks_per_axis[1] == 1: # if serial in y axis + rank_in_axis[1] = 0 + else: + rank_in_axis[1] = (self._rank - ranks_per_axis[0] * ranks_per_axis[1] + * rank_in_axis[2]) % ranks_per_axis[2] # y axis + + mesh_bounds = [] + for d in range(self._dims): + if rank_in_axis[d] > 0: + mesh_bounds.append(macro_bounds[d * 2] + rank_in_axis[d] * dx[d]) + mesh_bounds.append(macro_bounds[d * 2] + (rank_in_axis[d] + 1) * dx[d]) + elif rank_in_axis[d] == 0: + mesh_bounds.append(macro_bounds[d * 2]) + mesh_bounds.append(macro_bounds[d * 2] + dx[d]) + + # Adjust the maximum bound to be exactly the domain size + if rank_in_axis[d] + 1 == ranks_per_axis[d]: + mesh_bounds[d * 2 + 1] = macro_bounds[d * 2 + 1] + + self._logger.info("Bounding box limits are {}".format(mesh_bounds)) + + return mesh_bounds diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index ddd9f19c..1330f9c2 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -8,7 +8,7 @@ import sys import precice from mpi4py import MPI -from math import sqrt, exp +from math import exp import numpy as np import logging import time @@ -16,6 +16,7 @@ from .config import Config from .micro_simulation import create_micro_problem_class from .adaptivity.local_adaptivity import LocalAdaptivityCalculator +from .domain_decomposition import DomainDecomposer sys.path.append(os.getcwd()) @@ -77,6 +78,10 @@ def __init__(self, config_file: str) -> None: self._read_data_ids[name] = self._interface.get_data_id(name, self._macro_mesh_id) self._macro_bounds = self._config.get_macro_domain_bounds() + + if self._is_parallel: # Simulation is run in parallel + self._ranks_per_axis = self._config.get_ranks_per_axis() + self._is_micro_solve_time_required = self._config.write_micro_solve_time() self._local_number_of_micro_sims = None @@ -110,47 +115,6 @@ def __init__(self, config_file: str) -> None: self._is_adaptivity_required_in_every_implicit_iteration = self._config.is_adaptivity_required_in_every_implicit_iteration() self._micro_sims_active_steps = None - def decompose_macro_domain(self, macro_bounds: list) -> list: - """ - Decompose the macro domain equally among all ranks, if the Micro Manager is run in parallel. - - Parameters - ---------- - macro_bounds : list - List containing upper and lower bounds of the macro domain. - Format in 2D is [x_min, x_max, y_min, y_max] - Format in 2D is [x_min, x_max, y_min, y_max, z_min, z_max] - - Returns - ------- - mesh_bounds : list - List containing the upper and lower bounds of the domain pertaining to this rank. - Format is same as input parameter macro_bounds. - """ - size_x = int(sqrt(self._size)) - while self._size % size_x != 0: - size_x -= 1 - - size_y = int(self._size / size_x) - - dx = abs(macro_bounds[1] - macro_bounds[0]) / size_x - dy = abs(macro_bounds[3] - macro_bounds[2]) / size_y - - local_xmin = macro_bounds[0] + dx * (self._rank % size_x) - local_ymin = macro_bounds[2] + dy * int(self._rank / size_x) - - mesh_bounds = [] - if self._interface.get_dimensions() == 2: - mesh_bounds = [local_xmin, local_xmin + dx, local_ymin, local_ymin + dy] - elif self._interface.get_dimensions() == 3: - # TODO: Domain needs to be decomposed optimally in the Z direction - # too - mesh_bounds = [local_xmin, local_xmin + dx, local_ymin, local_ymin + dy, macro_bounds[4], macro_bounds[5]] - - self._logger.info("Bounding box limits are {}".format(mesh_bounds)) - - return mesh_bounds - def initialize(self) -> None: """ This function does the following things: @@ -165,7 +129,8 @@ def initialize(self) -> None: assert len(self._macro_bounds) / \ 2 == self._interface.get_dimensions(), "Provided macro mesh bounds are of incorrect dimension" if self._is_parallel: - coupling_mesh_bounds = self.decompose_macro_domain(self._macro_bounds) + domain_decomposer = DomainDecomposer(self._logger, self._interface.get_dimensions(), self._rank, self._size) + coupling_mesh_bounds = domain_decomposer.decompose_macro_domain(self._macro_bounds, self._ranks_per_axis) else: coupling_mesh_bounds = self._macro_bounds diff --git a/tests/integration/test_adaptivity/run_micro_manager.py b/tests/integration/test_adaptivity/run_micro_manager.py deleted file mode 100644 index bfb69ab5..00000000 --- a/tests/integration/test_adaptivity/run_micro_manager.py +++ /dev/null @@ -1,11 +0,0 @@ -""" -Script to run the Micro Manager -""" - -from micro_manager import MicroManager - -manager = MicroManager("./micro-manager-config.json") - -manager.initialize() - -manager.solve() diff --git a/tests/integration/test_adaptivity/clean-test.sh b/tests/integration/test_unit_cube_dummy/clean-test.sh similarity index 100% rename from tests/integration/test_adaptivity/clean-test.sh rename to tests/integration/test_unit_cube_dummy/clean-test.sh diff --git a/tests/integration/test_adaptivity/micro-manager-config.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json similarity index 95% rename from tests/integration/test_adaptivity/micro-manager-config.json rename to tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json index 400754e9..9cfa6e6b 100644 --- a/tests/integration/test_adaptivity/micro-manager-config.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json @@ -1,5 +1,5 @@ { - "micro_file_name": "micro_solver", + "micro_file_name": "micro_dummy", "coupling_params": { "config_file_name": "precice-config.xml", "macro_mesh_name": "macro-cube-mesh", diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json new file mode 100644 index 00000000..42ad035a --- /dev/null +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json @@ -0,0 +1,17 @@ +{ + "micro_file_name": "micro_dummy", + "coupling_params": { + "config_file_name": "./precice-config.xml", + "macro_mesh_name": "macro-cube-mesh", + "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, + "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0, 1, 0, 1, 0, 1], + "axiswise_ranks": [1, 1, 2], + "adaptivity": "False" + }, + "diagnostics": { + "output_micro_sim_solve_time": "True" + } +} diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json new file mode 100644 index 00000000..f58f7cd6 --- /dev/null +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json @@ -0,0 +1,17 @@ +{ + "micro_file_name": "micro_dummy", + "coupling_params": { + "config_file_name": "./precice-config.xml", + "macro_mesh_name": "macro-cube-mesh", + "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, + "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0, 1, 0, 1, 0, 1], + "axiswise_ranks": [1, 2, 3], + "adaptivity": "False" + }, + "diagnostics": { + "output_micro_sim_solve_time": "True" + } +} diff --git a/tests/integration/test_adaptivity/micro_solver.py b/tests/integration/test_unit_cube_dummy/micro_dummy.py similarity index 100% rename from tests/integration/test_adaptivity/micro_solver.py rename to tests/integration/test_unit_cube_dummy/micro_dummy.py diff --git a/tests/integration/test_adaptivity/precice-config.xml b/tests/integration/test_unit_cube_dummy/precice-config.xml similarity index 100% rename from tests/integration/test_adaptivity/precice-config.xml rename to tests/integration/test_unit_cube_dummy/precice-config.xml diff --git a/tests/integration/test_unit_cube_dummy/run_micro_manager.py b/tests/integration/test_unit_cube_dummy/run_micro_manager.py new file mode 100644 index 00000000..a3d0dd6c --- /dev/null +++ b/tests/integration/test_unit_cube_dummy/run_micro_manager.py @@ -0,0 +1,16 @@ +""" +Script to run the Micro Manager +""" + +from micro_manager import MicroManager +from argparse import ArgumentParser + +parser = ArgumentParser() +parser.add_argument("--config", help="Path to the micro manager configuration file") +args = parser.parse_args() + +manager = MicroManager(args.config) + +manager.initialize() + +manager.solve() diff --git a/tests/integration/test_adaptivity/macro_solver.py b/tests/integration/test_unit_cube_dummy/unit_cube_macro.py similarity index 100% rename from tests/integration/test_adaptivity/macro_solver.py rename to tests/integration/test_unit_cube_dummy/unit_cube_macro.py diff --git a/tests/unit/test_domain_decomposition.py b/tests/unit/test_domain_decomposition.py new file mode 100644 index 00000000..04473394 --- /dev/null +++ b/tests/unit/test_domain_decomposition.py @@ -0,0 +1,41 @@ +from unittest import TestCase +from unittest.mock import MagicMock +from micro_manager.domain_decomposition import DomainDecomposer +import numpy as np + + +class TestDomainDecomposition(TestCase): + + def setUp(self) -> None: + self._logger = MagicMock() + self._macro_bounds_3d = [-1, 1, -2, 2, -2, 8] + + def test_rank5_outof_10_3d(self): + rank = 5 + size = 10 + ranks_per_axis = [1, 2, 5] + domain_decomposer = DomainDecomposer(self._logger, 3, rank, size) + domain_decomposer._dims = 3 + mesh_bounds = domain_decomposer.decompose_macro_domain(self._macro_bounds_3d, ranks_per_axis) + + self.assertTrue(np.allclose(mesh_bounds, [-1, 1, 0, 2, 2, 4])) + + def test_rank10_out_of_32_3d(self): + rank = 10 + size = 32 + ranks_per_axis = [4, 1, 8] + domain_decomposer = DomainDecomposer(self._logger, 3, rank, size) + domain_decomposer._dims = 3 + mesh_bounds = domain_decomposer.decompose_macro_domain(self._macro_bounds_3d, ranks_per_axis) + + self.assertTrue(np.allclose(mesh_bounds, [0, 0.5, -2, 2, 0.5, 1.75])) + + def test_rank7_out_of_16_3d(self): + rank = 7 + size = 16 + ranks_per_axis = [8, 2, 1] + domain_decomposer = DomainDecomposer(self._logger, 3, rank, size) + domain_decomposer._dims = 3 + mesh_bounds = domain_decomposer.decompose_macro_domain(self._macro_bounds_3d, ranks_per_axis) + + self.assertTrue(np.allclose(mesh_bounds, [0.75, 1, -2, 0, -2, 8])) From f01e035141ceecb463316a6971231ed3241bca58 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 23 May 2023 13:59:00 +0200 Subject: [PATCH 36/87] Repair global collective operations for number of sims on ranks and global ids --- micro_manager/micro_manager.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 1330f9c2..2d239589 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -152,11 +152,10 @@ def initialize(self) -> None: else: raise Exception("Micro Manager has no micro simulations.") - nms_all_ranks = np.zeros(self._size, dtype=np.int64) # Gather number of micro simulations that each rank has, because this rank needs to know how many micro # simulations have been created by previous ranks, so that it can set # the correct global IDs - self._comm.Allgather(np.array(self._local_number_of_micro_sims), nms_all_ranks) + nms_all_ranks = self._comm.allreduce(self._local_number_of_micro_sims) # Get global number of micro simulations self._global_number_of_micro_sims = np.sum(nms_all_ranks) @@ -190,7 +189,7 @@ def initialize(self) -> None: self._micro_problem)(i, self._global_ids_of_local_sims[i]) micro_sim_is_on_rank = [None] * self._local_number_of_micro_sims - for i in self._global_ids_of_local_sims: + for i in self._local_number_of_micro_sims: micro_sim_is_on_rank[i] = self._rank self._micro_sim_is_on_rank = self._comm.allgather(micro_sim_is_on_rank) # DECLARATION From f899e6b83b95f8032e4feca49e74c4febc3c164f Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 23 May 2023 14:17:39 +0200 Subject: [PATCH 37/87] Go back to older Allgather commands to collect info on number of sims and global ids on all ranks --- micro_manager/micro_manager.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 2d239589..2e20bafe 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -152,10 +152,11 @@ def initialize(self) -> None: else: raise Exception("Micro Manager has no micro simulations.") + nms_all_ranks = np.zeros(self._size, dtype=np.int64) # Gather number of micro simulations that each rank has, because this rank needs to know how many micro # simulations have been created by previous ranks, so that it can set # the correct global IDs - nms_all_ranks = self._comm.allreduce(self._local_number_of_micro_sims) + self._comm.Allgather(np.array(self._local_number_of_micro_sims), nms_all_ranks) # Get global number of micro simulations self._global_number_of_micro_sims = np.sum(nms_all_ranks) @@ -188,11 +189,12 @@ def initialize(self) -> None: self._micro_sims[i] = create_micro_problem_class( self._micro_problem)(i, self._global_ids_of_local_sims[i]) - micro_sim_is_on_rank = [None] * self._local_number_of_micro_sims + micro_sim_is_on_rank = np.zeros(self._local_number_of_micro_sims) for i in self._local_number_of_micro_sims: micro_sim_is_on_rank[i] = self._rank - self._micro_sim_is_on_rank = self._comm.allgather(micro_sim_is_on_rank) # DECLARATION + self._micro_sim_is_on_rank = np.zeros(self._global_number_of_micro_sims) # DECLARATION + self._comm.Allgather(micro_sim_is_on_rank, self._micro_sim_is_on_rank) else: self._micro_sims = [] # DECLARATION for i in range(self._local_number_of_micro_sims): From 5228040c8bf648d9ffebb4a34835d1c415a3323f Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 23 May 2023 14:28:10 +0200 Subject: [PATCH 38/87] Use range for iteratable --- micro_manager/micro_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 2e20bafe..07dd2bad 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -190,7 +190,7 @@ def initialize(self) -> None: self._micro_problem)(i, self._global_ids_of_local_sims[i]) micro_sim_is_on_rank = np.zeros(self._local_number_of_micro_sims) - for i in self._local_number_of_micro_sims: + for i in range(self._local_number_of_micro_sims): micro_sim_is_on_rank[i] = self._rank self._micro_sim_is_on_rank = np.zeros(self._global_number_of_micro_sims) # DECLARATION From 9d4a57af7d15cb4072ed2630b231d55dc0639d13 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Wed, 24 May 2023 09:17:00 +0200 Subject: [PATCH 39/87] Add unit tests for micro_manager file (#37) * Add unit tests for micro_manager file * Add `create_micro_problem_class` test * Add action for unit tests * Change python to python3 * Install setup tools and wheels and change branches * Change python to python3 * Actually change python to python3 * Really change python to python3 * Ignore pip * Install python is python3 * Undo Previous changes * Remove sudo * Update apt * Add python bindings to `PYTHONPATH` * Install required packages * Mock pyprecice instead of installing test interface * Add more tests * Change pip3 to python3 -m pipp * Install pip * Update apt * Uninstall pyprecice after installation * Execute tests in test directory * Update tests/unit/precice.py Co-authored-by: Ishaan Desai * Move to `setUp` function and test constuctor * Revert branches on which push-tests are run * Undo unrelated changes * Change unit test branches to main and develop * Remove quotes around branches * Remove `manager.initialize` * Use adaptive config, remove print, manually set `_local_number_of micro_sims` * Split config test into two * Change history param * Join config tests * Fix failing tests * Renaming configs and test files * Use new files names in configs and tests --------- Co-authored-by: Ishaan Desai --- .../run-micro-manager-unit-tests.yml | 30 ++++++ ...-manager-unit-test-adaptivity-config.json} | 0 .../unit/micro-manager-unit-test-config.json | 24 +++++ tests/unit/precice.py | 78 +++++++++++++++ tests/unit/test_adaptivity.py | 2 +- tests/unit/test_micro_manager.py | 98 +++++++++++++++++++ 6 files changed, 231 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/run-micro-manager-unit-tests.yml rename tests/unit/{test_adaptivity_config.json => micro-manager-unit-test-adaptivity-config.json} (100%) create mode 100644 tests/unit/micro-manager-unit-test-config.json create mode 100644 tests/unit/precice.py create mode 100644 tests/unit/test_micro_manager.py diff --git a/.github/workflows/run-micro-manager-unit-tests.yml b/.github/workflows/run-micro-manager-unit-tests.yml new file mode 100644 index 00000000..984aa2f5 --- /dev/null +++ b/.github/workflows/run-micro-manager-unit-tests.yml @@ -0,0 +1,30 @@ +name: Run unit tests +on: + push: + branches: + - main + - develop + pull_request: + branches: + - "*" +jobs: + unit-tests: + runs-on: ubuntu-latest + container: precice/precice + steps: + - uses: actions/checkout@v3 + with: + path: micro-manager + + - name: Install micro-manager + run: | + cd micro-manager + apt-get -qq update + apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config + python3 -m pip install --user . + python3 -m pip uninstall -y pyprecice + + - name: Run unit tests + working-directory: micro-manager/tests/unit + run: | + python3 -m unittest test_micro_manager.py diff --git a/tests/unit/test_adaptivity_config.json b/tests/unit/micro-manager-unit-test-adaptivity-config.json similarity index 100% rename from tests/unit/test_adaptivity_config.json rename to tests/unit/micro-manager-unit-test-adaptivity-config.json diff --git a/tests/unit/micro-manager-unit-test-config.json b/tests/unit/micro-manager-unit-test-config.json new file mode 100644 index 00000000..bddd71cd --- /dev/null +++ b/tests/unit/micro-manager-unit-test-config.json @@ -0,0 +1,24 @@ +{ + "micro_file_name": "test_micro_manager", + "coupling_params": { + "config_file_name": "./precice-config.xml", + "macro_mesh_name": "macro-mesh", + "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, + "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], + "micro_output_n": 10, + "adaptivity": "True", + "adaptivity_type": "local", + "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], + "adaptivity_history_param": 0.5, + "adaptivity_coarsening_constant": 0.3, + "adaptivity_refining_constant": 0.4, + "adaptivity_every_implicit_iteration": "False" + + }, + "diagnostics": { + "output_micro_sim_solve_time": "True" + } +} diff --git a/tests/unit/precice.py b/tests/unit/precice.py new file mode 100644 index 00000000..35d4c5fe --- /dev/null +++ b/tests/unit/precice.py @@ -0,0 +1,78 @@ +# This file mocks pyprecice, the Python bindings for preCICE and is used _only_ for unit testing the Micro Manager. +import numpy as np + + +def action_write_initial_data(): + return "ActionWriteInitialData" + + +def action_write_iteration_checkpoint(): + return "ActionWriteIterationCheckpoint" + + +class Interface: + def __init__(self, solver_name, config_file_name, solver_process_index, solver_process_size): + self.read_write_vector_buffer = [] + self.read_write_scalar_buffer = [] + + def get_mesh_id(self, mesh_name): + return 0 + + def get_data_id(self, data_name, mesh_id): + return int(data_name == "micro-scalar-data") + + def get_dimensions(self): + return 3 + + def set_mesh_access_region(self, mesh_id, bounds): + pass + + def initialize(self): + return 0.1 # dt + + def get_mesh_vertices_and_ids(self, mesh_id): + return np.array([0, 1, 2, 3]), np.array([[0, 0], [1, 0], [1, 1], [0, 1]]) + + def is_action_required(self, action): + return True + + def mark_action_fulfilled(self, action): + pass + + def initialize_data(self): + pass + + def write_block_scalar_data(self, data_id, vertex_ids, data): + if data_id == 1: # micro-scalar-data not micro_sim_time + self.read_write_scalar_buffer = data + + def write_block_vector_data(self, data_id, vertex_ids, data): + self.read_write_vector_buffer = data + + def write_scalar_data(self, data_id, vertex_id, data): + pass + + def write_vector_data(self, data_id, vertex_id, data): + pass + + def read_block_scalar_data(self, data_id, vertex_ids): + return self.read_write_scalar_buffer + + def read_block_vector_data(self, data_id, vertex_ids): + return self.read_write_vector_buffer + + def read_scalar_data(self, data_id, vertex_id): + return 0 + + def read_vector_data(self, data_id, vertex_id): + return [0, 0] + + def finalize(self): + pass + + def is_coupling_ongoing(self): + yield True + yield False + + def advance(self, dt): + pass diff --git a/tests/unit/test_adaptivity.py b/tests/unit/test_adaptivity.py index c621c575..4383893d 100644 --- a/tests/unit/test_adaptivity.py +++ b/tests/unit/test_adaptivity.py @@ -8,7 +8,7 @@ class TestAdaptivity(TestCase): def setUp(self): self._adaptivity_controller = LocalAdaptivityCalculator( - Config("./tests/unit/test_adaptivity_config.json"), range(5), 5) + Config("./tests/unit/micro-manager-unit-test-adaptivity-config.json"), range(5), 5) self._number_of_sims = 5 self._dt = 0.1 self._dim = 3 diff --git a/tests/unit/test_micro_manager.py b/tests/unit/test_micro_manager.py new file mode 100644 index 00000000..04f18171 --- /dev/null +++ b/tests/unit/test_micro_manager.py @@ -0,0 +1,98 @@ +import numpy as np +from unittest import TestCase +import micro_manager + + +class MicroSimulation: + def __init__(self): + self.very_important_value = 0 + + def initialize(self): + pass + + def solve(self, macro_data, dt): + assert macro_data["macro-scalar-data"] == 1 + assert macro_data["macro-vector-data"].tolist() == [0, 1, 2] + return {"micro-scalar-data": macro_data["macro-scalar-data"] + 1, + "micro-vector-data": macro_data["macro-vector-data"] + 1} + + +class TestFunctioncalls(TestCase): + def setUp(self): + self.fake_read_data_names = {"macro-scalar-data": False, "macro-vector-data": True} + self.fake_read_data = [{"macro-scalar-data": 1, "macro-vector-data": np.array([0, 1, 2])}] * 4 + self.fake_write_data_names = { + "micro-scalar-data": False, + "micro-vector-data": True, + 'micro_sim_time': False, + 'active_state': False, + 'active_steps': False} + self.fake_write_data = [{"micro-scalar-data": 1, + "micro-vector-data": np.array([0, 1, 2]), + "micro_sim_time": 0, + "active_state": 0, + "active_steps": 0}] * 4 + self.macro_bounds = [0.0, 25.0, 0.0, 25.0, 0.0, 25.0] + + def test_micromanager_constructor(self): + manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') + self.assertListEqual(manager._macro_bounds, self.macro_bounds) + self.assertDictEqual(manager._read_data_names, self.fake_read_data_names) + self.assertDictEqual(self.fake_write_data_names, manager._write_data_names) + self.assertEqual(manager._micro_n_out, 10) + + def test_initialize(self): + manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') + manager.initialize() + self.assertEqual(manager._dt, 0.1) # from Interface.initialize + self.assertEqual(manager._global_number_of_micro_sims, 4) + self.assertListEqual(manager._macro_bounds, self.macro_bounds) + self.assertListEqual(manager._mesh_vertex_ids.tolist(), [0, 1, 2, 3]) + self.assertEqual(len(manager._micro_sims), 4) + self.assertEqual(manager._micro_sims[0].very_important_value, 0) # test inheritance + self.assertDictEqual(manager._read_data_names, self.fake_read_data_names) + self.assertDictEqual(self.fake_write_data_names, manager._write_data_names) + + def test_read_write_data_from_precice(self): + manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') + manager.write_data_to_precice(self.fake_write_data) + read_data = manager.read_data_from_precice() + for data, fake_data in zip(read_data, self.fake_write_data): + self.assertEqual(data["macro-scalar-data"], 1) + self.assertListEqual(data["macro-vector-data"].tolist(), + fake_data["micro-vector-data"].tolist()) + + def test_solve_mico_sims(self): + manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') + manager._local_number_of_micro_sims = 4 + manager._micro_sims = [MicroSimulation() for _ in range(4)] + manager._micro_sims_active_steps = np.zeros(4, dtype=np.int32) + micro_sims_output = manager.solve_micro_simulations(self.fake_read_data, np.array([True, True, True, True])) + for data, fake_data in zip(micro_sims_output, self.fake_write_data): + self.assertEqual(data["micro-scalar-data"], 2) + self.assertListEqual(data["micro-vector-data"].tolist(), + (fake_data["micro-vector-data"] + 1).tolist()) + + def test_config(self): + config = micro_manager.Config('micro-manager-unit-test-config.json') + + self.assertEqual(config._config_file_name.split("/")[-1], "precice-config.xml") + self.assertEqual(config._micro_file_name, "test_micro_manager") + self.assertEqual(config._macro_mesh_name, "macro-mesh") + self.assertEqual(config._micro_output_n, 10) + self.assertDictEqual(config._read_data_names, self.fake_read_data_names) + self.assertDictEqual(self.fake_write_data_names, config._write_data_names) + + # test adaptivity + self.assertEqual(config._adaptivity, True) + self.assertDictEqual(config._data_for_adaptivity, self.fake_read_data_names) + self.assertEqual(config._adaptivity_type, "local") + self.assertEqual(config._adaptivity_history_param, 0.5) + self.assertEqual(config._adaptivity_coarsening_constant, 0.3) + self.assertEqual(config._adaptivity_refining_constant, 0.4) + self.assertEqual(config._adaptivity_every_implicit_iteration, False) + + +if __name__ == '__main__': + import unittest + unittest.main() From c6e7e641fdb30ab66d0b0a90d6fa66a25f8c7e6e Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 24 May 2023 09:35:07 +0200 Subject: [PATCH 40/87] Update CHANEGLOG --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bf27cdf0..2c8a700a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,11 @@ ## latest +- New domain decomposition strategy based on user input of number of processors along each axis https://github.com/precice/micro-manager/pull/41 +- Add pickling support for C++ solver dummy https://github.com/precice/micro-manager/pull/30 +- Add C++ solver dummy to show how a C++ micro simulation can be controlled by the Micro Manager https://github.com/precice/micro-manager/pull/22 +- Add local adaptivity https://github.com/precice/micro-manager/pull/21 + ## v0.2.1 - Fixing the broken action workflow `run-macro-micro-dummy` From 65aeabfa8dd29f6aca7e94850937aaab75485946 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 24 May 2023 09:53:01 +0200 Subject: [PATCH 41/87] Make GitHub Actions YAML files consistent and also format them --- .github/workflows/run-adaptivity-test.yml | 16 ++++++++++----- .../run-domain-decomposition-tests.yml | 11 ++++++---- .github/workflows/run-macro-micro-dummy.yml | 20 +++++++++---------- .../run-micro-manager-unit-tests.yml | 3 +-- .../micro-manager-config-parallel-1.json | 2 +- .../micro-manager-config-parallel-2.json | 2 +- tests/unit/test_adaptivity.py | 2 +- 7 files changed, 31 insertions(+), 25 deletions(-) diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index a16fdd46..d179fb60 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -14,19 +14,21 @@ jobs: container: precice/precice steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v3 + - name: Install dependencies run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config python -m pip install --upgrade pip pip install setuptools wheel + - name: Install Micro Manager run: pip3 install --user . + - name: Run integration test - run: | - cd tests/integration/test_unit_cube_dummy/ - python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-adaptivity.json + working-directory: micro-manager/tests/integration/test_unit_cube_dummy + run: python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-adaptivity.json adaptivity_unit_tests: name: Run adaptivity unit tests @@ -35,13 +37,17 @@ jobs: steps: - name: Checkout Repository uses: actions/checkout@v3 + - name: Install Dependencies run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config python -m pip install --upgrade pip pip install setuptools wheel + - name: Install Micro Manager run: pip3 install --user . + - name: Run unit tests - run: python3 -m unittest tests/unit/test_adaptivity.py + working-directory: micro-manager/tests/unit + run: python3 -m unittest test_adaptivity.py diff --git a/.github/workflows/run-domain-decomposition-tests.yml b/.github/workflows/run-domain-decomposition-tests.yml index 13465c83..9a7c058d 100644 --- a/.github/workflows/run-domain-decomposition-tests.yml +++ b/.github/workflows/run-domain-decomposition-tests.yml @@ -37,15 +37,17 @@ jobs: - name: Run integration test (variant 1) timeout-minutes: 3 + working-directory: micro-manager/tests/integration run: | - chown -R precice tests/integration/test_unit_cube_dummy/ - cd tests/integration/test_unit_cube_dummy/ + chown -R precice test_unit_cube_dummy/ + cd test_unit_cube_dummy/ su -c "mpiexec -n 2 python3 run_micro_manager.py --config micro-manager-config-parallel-1.json & python3 unit_cube_macro.py" precice - name: Run integration test (variant 2) timeout-minutes: 3 + working-directory: micro-manager/tests/integration run: | - cd tests/integration/test_unit_cube_dummy/ + cd test_unit_cube_dummy/ su -c "mpiexec -n 6 --oversubscribe python3 run_micro_manager.py --config micro-manager-config-parallel-2.json & python3 unit_cube_macro.py" precice domain_decomposition_unit_tests: @@ -64,4 +66,5 @@ jobs: - name: Install Micro Manager run: pip3 install --user . - name: Run unit tests - run: python3 -m unittest tests/unit/test_domain_decomposition.py + working-directory: micro-manager/tests/unit + run: python3 -m unittest test_domain_decomposition.py diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 74c056a3..4c627ecc 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout Repository - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Install Dependencies run: | @@ -30,20 +30,19 @@ jobs: - name: Run python macro-micro dummy timeout-minutes: 3 - run: | - cd examples/ - python3 python-dummy/run_micro_manager.py --config ../micro-manager-config.json & python3 macro_dummy.py + working-directory: micro-manager/examples + run: python3 python-dummy/run_micro_manager.py --config ../micro-manager-config.json & python3 macro_dummy.py - name: Run adaptive python macro-micro dummy timeout-minutes: 3 - run: | - cd examples/ - python3 python-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py + working-directory: micro-manager/examples + run: python3 python-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py - name: Run c++ macro-micro dummy timeout-minutes: 3 + working-directory: micro-manager/examples run: | - cd examples/cpp-dummy/ + cd cpp-dummy/ pip install pybind11 c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) cd ../ @@ -51,6 +50,5 @@ jobs: - name: Run adaptive c++ macro-micro dummy timeout-minutes: 3 - run: | - cd examples/ - python3 cpp-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py + working-directory: micro-manager/examples + run: python3 cpp-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py diff --git a/.github/workflows/run-micro-manager-unit-tests.yml b/.github/workflows/run-micro-manager-unit-tests.yml index 984aa2f5..a8a2130e 100644 --- a/.github/workflows/run-micro-manager-unit-tests.yml +++ b/.github/workflows/run-micro-manager-unit-tests.yml @@ -26,5 +26,4 @@ jobs: - name: Run unit tests working-directory: micro-manager/tests/unit - run: | - python3 -m unittest test_micro_manager.py + run: python3 -m unittest test_micro_manager.py diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json index 42ad035a..4e62524b 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json @@ -1,7 +1,7 @@ { "micro_file_name": "micro_dummy", "coupling_params": { - "config_file_name": "./precice-config.xml", + "config_file_name": "precice-config.xml", "macro_mesh_name": "macro-cube-mesh", "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json index f58f7cd6..994bedd2 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json @@ -1,7 +1,7 @@ { "micro_file_name": "micro_dummy", "coupling_params": { - "config_file_name": "./precice-config.xml", + "config_file_name": "precice-config.xml", "macro_mesh_name": "macro-cube-mesh", "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} diff --git a/tests/unit/test_adaptivity.py b/tests/unit/test_adaptivity.py index 4383893d..08b8e76d 100644 --- a/tests/unit/test_adaptivity.py +++ b/tests/unit/test_adaptivity.py @@ -8,7 +8,7 @@ class TestAdaptivity(TestCase): def setUp(self): self._adaptivity_controller = LocalAdaptivityCalculator( - Config("./tests/unit/micro-manager-unit-test-adaptivity-config.json"), range(5), 5) + Config("micro-manager-unit-test-adaptivity-config.json"), range(5), 5) self._number_of_sims = 5 self._dt = 0.1 self._dim = 3 From 921b4471690e7a255bd4c2d744e6b97ba7295afc Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 24 May 2023 09:59:44 +0200 Subject: [PATCH 42/87] Add path in checkout Action for all actions --- .github/workflows/run-adaptivity-test.yml | 2 ++ .github/workflows/run-domain-decomposition-tests.yml | 5 ++++- .github/workflows/run-macro-micro-dummy.yml | 2 ++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index d179fb60..9f3d850d 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -15,6 +15,8 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v3 + with: + path: micro-manager - name: Install dependencies run: | diff --git a/.github/workflows/run-domain-decomposition-tests.yml b/.github/workflows/run-domain-decomposition-tests.yml index 9a7c058d..395f3f3a 100644 --- a/.github/workflows/run-domain-decomposition-tests.yml +++ b/.github/workflows/run-domain-decomposition-tests.yml @@ -14,7 +14,10 @@ jobs: container: precice/precice steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v3 + with: + path: micro-manager + - name: Install sudo for MPI run: | apt-get -qq update diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 4c627ecc..cc627a7f 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -16,6 +16,8 @@ jobs: - name: Checkout Repository uses: actions/checkout@v3 + with: + path: micro-manager - name: Install Dependencies run: | From 0548c1d44e32ae799fea1d378fe61d13a3339f7d Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 24 May 2023 10:23:07 +0200 Subject: [PATCH 43/87] Further formatting of Actions YAML files and consistent use of working-directory feature --- .github/workflows/run-adaptivity-test.yml | 6 ++++++ .github/workflows/run-domain-decomposition-tests.yml | 10 ++++++++++ .github/workflows/run-macro-micro-dummy.yml | 2 ++ .github/workflows/run-micro-manager-unit-tests.yml | 4 ++-- 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index 9f3d850d..ab17719f 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -19,6 +19,7 @@ jobs: path: micro-manager - name: Install dependencies + working-directory: micro-manager run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config @@ -26,6 +27,7 @@ jobs: pip install setuptools wheel - name: Install Micro Manager + working-directory: micro-manager run: pip3 install --user . - name: Run integration test @@ -39,8 +41,11 @@ jobs: steps: - name: Checkout Repository uses: actions/checkout@v3 + with: + path: micro-manager - name: Install Dependencies + working-directory: micro-manager run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config @@ -48,6 +53,7 @@ jobs: pip install setuptools wheel - name: Install Micro Manager + working-directory: micro-manager run: pip3 install --user . - name: Run unit tests diff --git a/.github/workflows/run-domain-decomposition-tests.yml b/.github/workflows/run-domain-decomposition-tests.yml index 395f3f3a..73330d50 100644 --- a/.github/workflows/run-domain-decomposition-tests.yml +++ b/.github/workflows/run-domain-decomposition-tests.yml @@ -19,6 +19,7 @@ jobs: path: micro-manager - name: Install sudo for MPI + working-directory: micro-manager run: | apt-get -qq update apt-get -qq install sudo @@ -30,12 +31,14 @@ jobs: run: useradd -m -s /bin/bash precice - name: Install Dependencies + working-directory: micro-manager run: | apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config su -c "python -m pip install --upgrade pip" precice su -c "pip install setuptools wheel twine" precice - name: Install micro-manager + working-directory: micro-manager run: su -c "pip3 install --user ." precice - name: Run integration test (variant 1) @@ -60,14 +63,21 @@ jobs: steps: - name: Checkout Repository uses: actions/checkout@v3 + with: + path: micro-manager + - name: Install Dependencies + working-directory: micro-manager run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config python -m pip install --upgrade pip pip install setuptools wheel + - name: Install Micro Manager + working-directory: micro-manager run: pip3 install --user . + - name: Run unit tests working-directory: micro-manager/tests/unit run: python3 -m unittest test_domain_decomposition.py diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index cc627a7f..0560b4ce 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -20,6 +20,7 @@ jobs: path: micro-manager - name: Install Dependencies + working-directory: micro-manager run: | apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config @@ -28,6 +29,7 @@ jobs: pip install setuptools wheel twine - name: Install micro-manager + working-directory: micro-manager run: pip3 install --user . - name: Run python macro-micro dummy diff --git a/.github/workflows/run-micro-manager-unit-tests.yml b/.github/workflows/run-micro-manager-unit-tests.yml index a8a2130e..daa0d23b 100644 --- a/.github/workflows/run-micro-manager-unit-tests.yml +++ b/.github/workflows/run-micro-manager-unit-tests.yml @@ -16,9 +16,9 @@ jobs: with: path: micro-manager - - name: Install micro-manager + - name: Install Micro Manager and uninstall pyprecice + working-directory: micro-manager run: | - cd micro-manager apt-get -qq update apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config python3 -m pip install --user . From e9540cc8c4b7db940050be8754e95fd2b3ee1808 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Thu, 25 May 2023 12:38:45 +0200 Subject: [PATCH 44/87] Update documentation for the preCICE website (#39) * Update Readme.md * Clean up README * Add \ to $ * Correct inline formulas * Restructure Micro Manager doc and implement review * Update links and give next steps * Reworking base README, docs/README and configuration * Reworking material --------- Co-authored-by: Ishaan Desai --- README.md | 218 ++----------------------------- docs/README.md | 28 ++++ docs/installation.md | 48 +++++++ docs/usage-configuration.md | 135 +++++++++++++++++++ docs/usage-convert-to-library.md | 100 ++++++++++++++ docs/usage-running.md | 30 +++++ docs/usage.md | 17 +++ 7 files changed, 368 insertions(+), 208 deletions(-) create mode 100644 docs/README.md create mode 100644 docs/installation.md create mode 100644 docs/usage-configuration.md create mode 100644 docs/usage-convert-to-library.md create mode 100644 docs/usage-running.md create mode 100644 docs/usage.md diff --git a/README.md b/README.md index 5b7848db..2ca326ac 100644 --- a/README.md +++ b/README.md @@ -1,219 +1,21 @@ # Micro Manager -A Manager tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://github.com/precice/precice). +A tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://www.precice.org/). -## Installing the Micro Manager +## Start Here -### Option 1: Using pip +The main documentation is on the [preCICE website](https://precice.org/) TODO: Update to link of documentation. -It is recommended to install [micro-manager-precice from PyPI](https://pypi.org/project/micro-manager-precice/) by running +Please report any bugs and issues [here](https://github.com/precice/micro-manager/issues) and give us feedback through [one of our community channels](https://precice.org/community-channels.html). -```bash -pip install --user micro-manager-precice -``` +## References -If the dependencies are not installed, then `pip` will attempt to install them for you. If you encounter problems in the direct installation, see the [dependencies section](https://github.com/precice/micro-manager#required-dependencies) below for links to installation procedures of all dependencies. +The concept and initial design of the Micro Manager has been discussed in -### Option 2: Clone this repository and install manually +Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037) -#### Required dependencies +The Micro Manager can adaptively control micro simulations. The adaptivity strategy is taken from two publications -Ensure that the following dependencies are installed: +1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. [10.1016/j.jcp.2012.12.025](https://doi.org/10.1016/j.jcp.2012.12.025). -* Python 3 or higher -* [preCICE](https://github.com/precice/precice/wiki) -* [pyprecice: Python language bindings for preCICE](https://github.com/precice/python-bindings) -* [numpy](https://numpy.org/install/) -* [mpi4py](https://mpi4py.readthedocs.io/en/stable/install.html) - -#### Build and install the Manager using pip - -After cloning this repository, go to the project directory `micro-manager` and run `pip3 install --user .`. - -#### Build and install the Manager using Python - -After cloning this repository, go to the project directory `micro-manager` and run `python setup.py install --user`. - -## Using the Micro Manager - -The Micro Manager facilitates two-scale coupling between one macro-scale simulation and many micro-scale simulations. It creates instances of several micro simulations and couples them to one macro simulation, using preCICE. An existing micro simulation code written in Python needs to be converted into a library with a specific class name and specific function names. The next section describes the required library structure of the micro simulation code. On the other hand, the micro-problem is coupled to preCICE directly. The section [couple your code](https://precice.org/couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. - -### Steps to convert micro simulation code to a callable library - -* Create a class called `MicroSimulation`. It is good practice to define class member variables in the class constructor `__init__`. This constructor does not get any input. -* **Optional**: Define a function `initialize` which computes the initial state of the micro simulation and returns initial values, which need to be transferred to the macro simulation. The return value needs to be a Python dictionary with the names of the quantities as keys and the values of the quantities as the dictionary values. -* Create a function `solve`, which consists of all solving steps of one time step of a micro simulation or, if the micro problem is a steady-state simulation, all solving steps until the steady state is reached. `solve` should take a Python dictionary as an input, which has the name of the input data as keys and the corresponding data values as values. The `solve` function should return the quantities that need to be communicated to the macro-side. The return entity needs to again be a Python dictionary with the names of the quantities as keys and the values of the quantities as values. -* If implicit coupling is required between the macro and all micro problems, then you can additionally define two functions `save_checkpoint` and `revert_to_checkpoint`. - * `save_checkpoint` should save the current state of the micro problem. - * `revert_to_checkpoint` should revert to the saved state (required if the coupling loop does not convergence after a time step). -* **Optional**: Define a function `output` which writes the micro simulation output. The micro Manager will call this function with the frequency set by the configuration option `simulation_params: micro_output_n`. - -An example of a MicroSimulation class as used in `/examples/macro-micro-dummy`: - -```python -class MicroSimulation: - - def __init__(self): - """ - Constructor of MicroSimulation class. - """ - self._dims = 3 - self._micro_scalar_data = None - self._micro_vector_data = None - self._checkpoint = None - - def initialize(self): - self._micro_scalar_data = 0 - self._micro_vector_data = [] - self._checkpoint = 0 - - def solve(self, macro_data, dt): - assert dt != 0 - self._micro_vector_data = [] - self._micro_scalar_data = macro_data["macro-scalar-data"] - for d in range(self._dims): - self._micro_vector_data.append(macro_data["macro-vector-data"][d]) - - return {"micro-scalar-data": self._micro_scalar_data.copy(), - "micro-vector-data": self._micro_vector_data.copy()} - - def save_checkpoint(self): - print("Saving state of micro problem") - self._checkpoint = self._micro_scalar_data - - def reload_checkpoint(self): - print("Reverting to old state of micro problem") - self._micro_scalar_data = self._checkpoint - - def output(self): - print("Writing VTK output of micro problem") - self._write_vtk() -``` - -### Configuring the Micro Manager - -The Micro Manager is configured at runtime using a JSON file `micro-manager-config.json`. An example configuration file is: - -```json -{ - "micro_file_name": "micro_heat", - "coupling_params": { - "participant_name": "Micro-Manager", - "config_file_name": "precice-config.xml", - "macro_mesh_name": "macro-mesh", - "write_data_names": {"k_00": "scalar", "k_11": "scalar", "porosity": "scalar"}, - "read_data_names": {"concentration": "scalar"} - }, - "simulation_params": { - "macro_domain_bounds": [0.0, 1.0, 0.0, 0.5], - "micro_output_n": 5 - }, - "diagnostics": { - "data_from_micro_sims": {"grain_size": "scalar"}, - "output_micro_sim_solve_time": "True" - } -} -``` - -The following quantities need to be configured: - -* `micro_file`: Path to the micro-simulation script. **Do not add the file extension** `.py`. -* `coupling_params`: - * `precice_config`: Path to the preCICE XML configuration file. - * `macro_mesh`: Name of the macro mesh as stated in the preCICE configuration. - * `read_data_names`: A Python dictionary with the names of the data to be read from preCICE as keys and `"scalar"` or `"vector"` as values. - * `write_data_names`: A Python dictionary with the names of the data to be written to preCICE as keys and `"scalar"` or `"vector"` as values. -* `simulation_params`: - * `macro_domain_bounds`: Minimum and maximum limits of the macro-domain, having the format `[xmin, xmax, ymin, ymax, zmin, zmax]`. - -In addition to the aforementioned configuration options there are optional choices: - -* `simulation_params`: - * `micro_output_n`: Frequency of calling the output functionality of the micro simulation in terms of number of time steps. If this quantity is configured the Micro Manager will attempt to call the `output()` function of the micro simulation. - -The Micro Manager is capable of generating diagnostics type output of the micro simulations, which is critical in the development phase of two-scale simulations. The following configuration options are available: - -* `diagnostics`: - * `data_from_micro_sims`: A Python dictionary with the names of the data from the micro simulation to be written to VTK files as keys and `"scalar"` or `"vector"` as values. - * `output_micro_sim_solve_time`: When `True`, the Manager writes the wall clock time of the `solve()` function of each micro simulation to the VTK output. - -The Micro Manager can adaptively initialize micro simulations. The adaptivity strategy is taken from two publications: - -1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. 10.1016/j.jcp.2012.12.025. - -2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. 10.1016/j.amc.2020.125933. - -To turn on adaptivity, the following options need to be set in `simulation_params`: - -* `adaptivity`: Set as `True`. -* `adaptivity_data`: List of names of data which are to be used to calculate if two micro-simulations are similar or not. For example `["macro-scalar-data", "macro-vector-data"]` -* `adaptivity_history_param`: History parameter $\Lambda$, set as $\Lambda >= 0$. -* `adaptivity_coarsening_constant`: Coarsening constant $C_c$, set as $C_c < 1$. -* `adaptivity_refining_constant`: Refining constant $C_r$, set as $C_r >= 0$. -* `adaptivity_every_implicit_iteration`: Set as `True` if adaptivity calculation is to be done in every implicit iteration. Setting `False` would lead to adaptivity being calculated once at the start of the time window and then reused in every implicit time iteration. - -All variables names are chosen to be same as the second publication mentioned above. - -#### Changes to preCICE configuration file - -The Micro Manager relies on the [export functionality](https://precice.org/configuration-export.html#enabling-exporters) of preCICE to write diagnostics data output. - -* If the option `diagnotics: data_from_micro_sims` is configured, the corresponding export tag also needs to be set in the preCICE XML configuration script. -* If adaptivity is turned on, the Micro Manager will attempt to write a scalar data set `active_state` to preCICE. Add this data set to the preCICE configuration file. - -### Running the Micro Manager - -The Micro Manager is run directly from the terminal by providing the configuration file as an input argument in the following way: - -```bash -micro_manager micro-manager-config.json -``` - -Alternatively the Manager can also be run by creating a Python script which imports the Micro Manager package and calls its run function. For example a run script `run-micro-manager.py` would look like: - -```python -from micro_manager import MicroManager - -manager = MicroManager("micro-manager-config.json") - -manager.run() -``` - -The script is then run: - -```bash -python run-micro-manager.py -``` - -The Micro Manager can also be run in parallel, using the same script as stated above: - -```bash -mpirun -n python3 run-micro-manager.py -``` - -### Advanced configuration options - -In addition to the above mentioned configuration options, the Manager offers more options for diagnostics output. - -If the user wants to output the clock time required to solve each micro simulation, They can add the following keyword to the configuration: - -```json -"diagnostics": { - "output_micro_sim_solve_time": "True" -} -``` - -Additionally if the micro simulation code has a function called `output`, the Manager will try to call it in order to generate output of all micro simulations. In this situation, the Manager can be configured to output at a particular interval. This configuration is done as follows: - -```json -"simulation_params": { - "micro_output_n": 10 -} -``` - -Here, the Manager will write output of micro simulations every 10 time steps. If the entity `micro_output_n` is not defined, then the Manager will output the micro simulation output in every time step. - -### Creating a preCICE configuration file for a macro-micro problem - -In addition to configuring the Micro Manager, preCICE itself also needs to be configured via a [XML configuration file](https://precice.org/configuration-overview.html). -The user is expected to configure preCICE with the correct names of the data being exchanged between the macro and micro side. An example of such a macro-micro configuration for preCICE can be found in [this two-scale heat conduction example](https://github.com/IshaanDesai/coupled-heat-conduction). +2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..4f4a3d1e --- /dev/null +++ b/docs/README.md @@ -0,0 +1,28 @@ +--- +title: The Micro Manager +permalink: tooling-micro-manager-overview.html +keywords: tooling, macro-micro, two-scale +summary: The Micro Manager is a tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library preCICE. +--- + +## What is this? + +The Micro Manager is a tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://www.precice.org/). + +## What can it do? + +The Micro Manager is able to couple many micro simulations with one macro simulation. This includes: + +- Passing data between micro and macro simulations +- Running micro simulations in parallel using MPI +- Adaptively activating and deactivating micro simulations based on whether their similar exist + +## Documentation + +For a more detailed look, the documentation is split into the following sections: + +- [Installation](tooling-micro-manager-installation.html) +- [Usage](tooling-micro-manager-usage.html) + - [Code Changes](tooling-micro-manager-code-changes.html) + - [Configuration](tooling-micro-manager-configuration.html) + - [Running](tooling-micro-manager-running.html) diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 00000000..55e42ad3 --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,48 @@ +--- +title: Installing the Micro Manager +permalink: tooling-micro-manager-installation.html +keywords: tooling, macro-micro, two-scale +summary: Install the Micro Manager by running `pip install --user micro-manager-precice`. +--- + +## Installation + +The Micro Manager is a Python package that can be installed using `pip`. Make sure [preCICE](installation-overview.html) is installed before installing the Micro Manager. The Micro Manager is compatible with preCICE version [2.5.0](https://github.com/precice/precice/releases/tag/v2.5.0). + +### Option 1: Using pip + +It is recommended to install [micro-manager-precice from PyPI](https://pypi.org/project/micro-manager-precice/) by running + +```bash +pip install --user micro-manager-precice +``` + +Unless already installed, the dependencies will be installed by `pip` during the installation procedure. preCICE itself needs to be installed separately. If you encounter problems in the direct installation, see the [dependencies section](#required-dependencies) below. + +### Option 2: Clone this repository and install manually + +#### Required dependencies + +Ensure that the following dependencies are installed: + +* Python 3 or higher +* [preCICE](installation-overview.html) [v2.5.0](https://github.com/precice/precice/releases/tag/v2.5.0) +* [pyprecice: Python language bindings for preCICE](installation-bindings-python.html) +* [numpy](https://numpy.org/install/) +* [mpi4py](https://mpi4py.readthedocs.io/en/stable/install.html) + +#### Build and install the Manager using pip + +After cloning this repository, go to the directory `micro-manager/` and run + +```bash +pip install --user . +``` + +#### Build and install the Manager using Python + +After cloning this repository, go to the project directory `micro-manager/` and run + +```bash +python setup.py install --user +``` diff --git a/docs/usage-configuration.md b/docs/usage-configuration.md new file mode 100644 index 00000000..456756bb --- /dev/null +++ b/docs/usage-configuration.md @@ -0,0 +1,135 @@ +--- +title: Configuration of the Micro Manager +permalink: tooling-micro-manager-usage-configuration.html +keywords: tooling, macro-micro, two-scale +summary: The Micro Manager uses a JSON file to configure the coupling. The coupled data has to be specified in the preCICE configuration file. +--- + +The Micro Manager is configured at runtime using a JSON file. An example configuration file is + +```json +{ + "micro_file_name": "micro_solver", + "coupling_params": { + "config_file_name": "precice-config.xml", + "macro_mesh_name": "macro-mesh", + "read_data_names": {"temperature": "scalar", "heat-flux": "vector"}, + "write_data_names": {"porosity": "scalar", "conductivity": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0.0, 1.0, 0.0, 1.0, 0.0, 1.0], + }, + "diagnostics": { + "output_micro_sim_solve_time": "True" + } +} +``` + +There are three main sections in the configuration file, the `coupling_params`, the `simulation_params` and the optional `diagnostics`. + +The file containing the Python importable micro simulation class is specified in the `micro_file_name` parameter. + +## Coupling Parameters + +Parameter | Description +--- | --- +`config_file_name` | Path to the preCICE XML configuration file. +`macro_mesh_name` | Name of the macro mesh as stated in the preCICE configuration. +`read_data_names` | A Python dictionary with the names of the data to be read from preCICE as keys and `"scalar"` or `"vector"` as values depending on the nature of the data. +`write_data_names` | A Python dictionary with the names of the data to be written to preCICE as keys and `"scalar"` or `"vector"` as values depending on the nature of the data. + +## Simulation Parameters + +Parameter | Description +--- | --- +`macro_domain_bounds`| Minimum and maximum bounds of the macro-domain, having the format `[xmin, xmax, ymin, ymax, zmin, zmax]` in 3D and `[xmin, xmax, ymin, ymax]` in 2D. +*optional:* `micro_output_n`| Frequency of calling the output functionality of the micro simulation in terms of number of time steps. If not given, `micro_sim.output()` is called every time step. +*optional:* Domain decomposition parameters | See section on [Domain decomposition](#domain-decomposition). But default, the Micro Manager assumes that it will be run in serial. +*optional:* Adaptivity parameters | See section on [Adaptivity](#adaptivity). By default, adaptivity is disabled. + +## *Optional*: Diagnostics + +Parameter | Description +--- | --- +`data_from_micro_sims` | A Python dictionary with the names of the data from the micro simulation to be written to VTK files as keys and `"scalar"` or `"vector"` as values. This relies on the [export functionality](configuration-export.html#enabling-exporters) of preCICE and requires the corresponding export tag to be set in the preCICE XML configuration script. +`output_micro_sim_solve_time` | If `True`, the Manager writes the wall clock time of the `solve()` function of each micro simulation to the VTK output. + +An example configuration file can be found in [`examples/micro-manager-config.json`](https://github.com/precice/micro-manager/tree/main/examples/micro-manager-config.json). + +## Domain decomposition + +The Micro Manager can be run in parallel. For a parallel run, set the desired number of paritions in each axis by setting the `axiswise_ranks` variable. For example, if the domain is 3D and the decomposition needs to be two paritions in x, one partition in y, and sixteen partitions in z, the setting is + +```json +"simulation_params": { + "macro_domain_bounds": [0, 1, 0, 1, 0, 1], + "axiswise_ranks": [2, 1, 16] +} +``` + +For a 2D domain, only two values need to be set `axiswise_ranks`. + +## Adaptivity + +The Micro Manager can adaptively control micro simulations. The adaptivity strategy is taken from + +1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. [10.1016/j.jcp.2012.12.025](https://doi.org/10.1016/j.jcp.2012.12.025). + +2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). + +To turn on adaptivity, the following options need to be set in `simulation_params`: + +Parameter | Description +--- | --- +`adaptivity` | Set as `True` to turn on adaptivity (`False` by default). +`adaptivity_type` | Set to either `local` or `global`. The type of adaptivity matters when the Micro Manager is run in parallel. `local` means comparing micro simulations within a local partitioned domain for similarity. `global` means comparing micro simulations from all partitions, so over the entire domain. +`adaptivity_data` | List of names of data which are to be used to calculate if micro-simulations are similar or not. For example `["temperature", "porosity"]`. +`adaptivity_history_param` | History parameter $$ \Lambda $$, set as $$ \Lambda >= 0 $$. +`adaptivity_coarsening_constant` | Coarsening constant $$ C_c $$, set as $$ C_c < 1 $$. +`adaptivity_refining_constant` | Refining constant $$ C_r $$, set as $$ C_r >= 0 $$. +`adaptivity_every_implicit_iteration` | If True, adaptivity is calculated in every implicit iteration.
If False, adaptivity is calculated once at the start of the time window and then reused in every implicit time iteration. + +All variables are chosen from the [second publication](https://doi.org/10.1016/j.amc.2020.125933) mentioned above. + +Example of adaptivity configuration + +```json +"simulation_params": { + "macro_domain_bounds": [0, 1, 0, 1, 0, 1], + "adaptivity": "True", + "adaptivity_type": "local", + "adaptivity_data": ["temperature", "porosity"], + "adaptivity_history_param": 0.5, + "adaptivity_coarsening_constant": 0.3, + "adaptivity_refining_constant": 0.4, + "adaptivity_every_implicit_iteration": "True" +} +``` + +If adaptivity is turned on, the Micro Manager will attempt to write two scalar data per micro simulation to preCICE, called `active_state` and `active_steps`. + +Parameter | Description +--- | --- +`active_state` | `1` if the micro simulation is active in the time window, and `0` if inactive. +`active_steps` | Summation of `active_state` up to the current time window. + +The Micro Manager uses the output functionality of preCICE, hence these data sets to the preCICE configuration file. In the mesh and the micro participant add the following lines: + +```xml + + + + + + + + + + + + +``` + +## Next Steps + +After creating a configuration file you are ready to [run the Micro Manager](tooling-micro-manager-usage-running.html). diff --git a/docs/usage-convert-to-library.md b/docs/usage-convert-to-library.md new file mode 100644 index 00000000..2fbe0e92 --- /dev/null +++ b/docs/usage-convert-to-library.md @@ -0,0 +1,100 @@ +--- +title: Convert Your Micro Simulation to Library +permalink: tooling-micro-manager-usage-code-changes.html +keywords: tooling, macro-micro, two-scale +summary: You need to create an Python-importable class from your micro simulation code. +--- + +## Steps to convert micro simulation code to a callable library + +The Micro Manager requires a specific structure of the micro simulation code. Create a class that can be called from Python with the structure given below. The docstring of each function gives information on what it should do and what its input and output should be. + +```python +class MicroSimulation: # Name is fixed + + def __init__(self): + """ + Constructor of class MicroSimulation. Initialize all class member variables here. + """ + + def initialize(self) -> dict: + """ + Initialize the micro simulation. This function is *optional*. + + Returns + ------- + data : dict + Python dictionary with keys as names of micro data and values as the data at the initial condition + """ + + def solve(self, macro_data, dt) -> dict: + """ + Solve one time step of the micro simulation or for steady-state problems: solve until steady state is reached. + + Parameters + ---------- + macro_data : dict + Dictionary with keys as names of macro data and values as the data + dt : float + Time step size + + Returns + ------- + micro_data : dict + Dictionary with keys as names of micro data and values as the updated micro data + """ + + def save_checkpoint(self): + """ + Save the state of the micro simulation. *Required for implicit coupling*. + Save the state internally. + """ + + def reload_checkpoint(self): + """ + Revert the micro simulation to a previously saved state. *Required for implicit coupling*. + """ + + def output(self): + """ + This function writes output of the micro simulation in some form. + It will be called with frequency set by configuration option `simulation_params: micro_output_n` + This function is *optional*. + """ +``` + +Skeleton dummy code of a sample MicroSimulation class can be found in the [examples/](https://github.com/precice/micro-manager/tree/main/examples/) directory. There are two variants + +* `examples/python-dummy/`: Dummy micro simulation written in Python +* `examples/cpp-dummy/`: Dummy micro simulation written in C++ and compiled to a Python library using [pybind11](https://pybind11.readthedocs.io/en/stable/) + +### Convert your micro simulation written in C++ to a callable library + +A C++ dummy micro simulation is provided in [`examples/cpp-dummy/`](github.com/precice/micro-manager/tree/main/examples/cpp-dummy). +It uses [pybind11](https://pybind11.readthedocs.io/en/stable/) to compile the C++ code into a library which can be imported in Python. If the micro simulation in C++, [install pybind11](https://pybind11.readthedocs.io/en/stable/installing.html). + +Creating a new micro simulation in C++ has the following steps + +1. Create a C++ class which implements the functions given [above](#steps-to-convert-micro-simulation-code-to-a-callable-library). +The `solve()` function should have the following signature: + + ```cpp + py::dict MicroSimulation::solve(py::dict macro_data, double dt) + ``` + + [`py::dict`](https://pybind11.readthedocs.io/en/stable/advanced/pycpp/object.html?#instantiating-compound-python-types-from-c) is a Python dictionary which can be used to pass data between Python and C++. You need to cast the data to the correct type before using it in C++ and vice versa. An example is given in the dummy micro simulation. + +2. Export the C++ class to Python using pybind11. Follow the instructions to exporting classes in the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/classes.html) or read their [first steps](https://pybind11.readthedocs.io/en/stable/basics.html) to get started. + +3. Compile the C++ library including pybind11. For the solverdummy, run + + ```bash + c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) + ``` + + This will create a shared library `micro_dummy.so` which can be directly imported in Python. + For more information on compiling C++ libraries, see the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/compiling.html). + +## Next Steps + +With your code converted to a library, you can now [create a coupling configuration](tooling-micro-manager-usage-configuration.html). diff --git a/docs/usage-running.md b/docs/usage-running.md new file mode 100644 index 00000000..971290fb --- /dev/null +++ b/docs/usage-running.md @@ -0,0 +1,30 @@ +--- +title: Running the Micro Manager +permalink: tooling-micro-manager-usage-running.html +keywords: tooling, macro-micro, two-scale +summary: Run the Micro Manager from the terminal with a configuration file as input argument or from a Python script. +--- + +The Micro Manager is run directly from the terminal by providing the configuration file as an input argument in the following way + +```bash +micro_manager micro-manager-config.json +``` + +Alternatively the Manager can also be run by creating a Python script which imports the Micro Manager package and calls its run function. For example a run script `run-micro-manager.py` would look like + +```python +from micro_manager import MicroManager + +manager = MicroManager("micro-manager-config.json") + +manager.initialize() + +manager.solve() +``` + +The Micro Manager can also be run in parallel, using the same script as stated above + +```bash +mpirun -n python3 run-micro-manager.py +``` diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 00000000..93ee1d27 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,17 @@ +--- +title: Usage +permalink: tooling-micro-manager-usage.html +keywords: tooling, macro-micro, two-scale +--- + +## Using the Micro Manager + +The Micro Manager facilitates two-scale coupling between one macro-scale and many micro-scale simulations. It creates instances of several micro simulations and couples them to one macro simulation, using preCICE. + +An existing micro simulation code needs to be converted into a library with a specific class name which has functions with specific names. The next section describes the required library structure of the micro simulation code. On the other hand, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. + +To run your first macro-micro simulation using the Micro Manager follow the three steps: + +1. [Create a micro simulation library](tooling-micro-manager-usage-code-changes.html) +2. [Create a configuration file](tooling-micro-manager-usage-configuration.html) +3. [Run the Micro Manager](tooling-micro-manager-usage-running.html) From 0174fa68f09a1d6a4877e0836ee213854b3fb561 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Tue, 30 May 2023 14:00:32 +0200 Subject: [PATCH 45/87] Add update-website workflow (#46) --- .github/workflows/update-website.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 .github/workflows/update-website.yml diff --git a/.github/workflows/update-website.yml b/.github/workflows/update-website.yml new file mode 100644 index 00000000..ecdd6dbe --- /dev/null +++ b/.github/workflows/update-website.yml @@ -0,0 +1,22 @@ +name: Update website +on: + push: + branches: + - 'develop' + paths: + - 'docs/**' +jobs: + trigger: + runs-on: ubuntu-latest + env: + WORKFLOW_FILENAME: update-submodules.yml + steps: + - name: Trigger workflow + run: | + curl \ + --request POST \ + --url https://api.github.com/repos/precice/precice.github.io/actions/workflows/$WORKFLOW_FILENAME/dispatches \ + --header "authorization: token ${{ secrets.WORKFLOW_DISPATCH_TOKEN }}" \ + --header "Accept: application/vnd.github.v3+json" \ + --data '{"ref":"master"}' \ + --fail From 221f16d674015fb676c873e4ce58078e6916bd37 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Tue, 30 May 2023 15:52:09 +0200 Subject: [PATCH 46/87] Move to diagnostics (#45) Co-authored-by: Ishaan Desai --- README.md | 1 - docs/usage-configuration.md | 2 +- micro_manager/config.py | 12 ++++++------ tests/unit/micro-manager-unit-test-config.json | 3 +-- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index 2ca326ac..6ff1c985 100644 --- a/README.md +++ b/README.md @@ -17,5 +17,4 @@ Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software ap The Micro Manager can adaptively control micro simulations. The adaptivity strategy is taken from two publications 1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. [10.1016/j.jcp.2012.12.025](https://doi.org/10.1016/j.jcp.2012.12.025). - 2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). diff --git a/docs/usage-configuration.md b/docs/usage-configuration.md index 456756bb..4412224c 100644 --- a/docs/usage-configuration.md +++ b/docs/usage-configuration.md @@ -43,7 +43,6 @@ Parameter | Description Parameter | Description --- | --- `macro_domain_bounds`| Minimum and maximum bounds of the macro-domain, having the format `[xmin, xmax, ymin, ymax, zmin, zmax]` in 3D and `[xmin, xmax, ymin, ymax]` in 2D. -*optional:* `micro_output_n`| Frequency of calling the output functionality of the micro simulation in terms of number of time steps. If not given, `micro_sim.output()` is called every time step. *optional:* Domain decomposition parameters | See section on [Domain decomposition](#domain-decomposition). But default, the Micro Manager assumes that it will be run in serial. *optional:* Adaptivity parameters | See section on [Adaptivity](#adaptivity). By default, adaptivity is disabled. @@ -53,6 +52,7 @@ Parameter | Description --- | --- `data_from_micro_sims` | A Python dictionary with the names of the data from the micro simulation to be written to VTK files as keys and `"scalar"` or `"vector"` as values. This relies on the [export functionality](configuration-export.html#enabling-exporters) of preCICE and requires the corresponding export tag to be set in the preCICE XML configuration script. `output_micro_sim_solve_time` | If `True`, the Manager writes the wall clock time of the `solve()` function of each micro simulation to the VTK output. +`micro_output_n`| Frequency of calling the optional output functionality of the micro simulation in terms of number of time steps. If not given, `micro_sim.output()` is called every time step. An example configuration file can be found in [`examples/micro-manager-config.json`](https://github.com/precice/micro-manager/tree/main/examples/micro-manager-config.json). diff --git a/micro_manager/config.py b/micro_manager/config.py index 9895a21b..e4437ccc 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -99,12 +99,6 @@ def read_json(self, config_filename): except BaseException: print("Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") - try: - self._micro_output_n = data["simulation_params"]["micro_output_n"] - except BaseException: - print("Output interval of micro simulations not specified, if output is available then it will be called " - "in every time window.") - try: if data["simulation_params"]["adaptivity"] == "True": self._adaptivity = True @@ -156,6 +150,12 @@ def read_json(self, config_filename): except BaseException: print("No diagnostics data is defined. Micro Manager will not output any diagnostics data.") + try: + self._micro_output_n = data["diagnostics"]["micro_output_n"] + except BaseException: + print("Output interval of micro simulations not specified, if output is available then it will be called " + "in every time window.") + try: if data["diagnostics"]["output_micro_sim_solve_time"]: self._output_micro_sim_time = True diff --git a/tests/unit/micro-manager-unit-test-config.json b/tests/unit/micro-manager-unit-test-config.json index bddd71cd..222610b6 100644 --- a/tests/unit/micro-manager-unit-test-config.json +++ b/tests/unit/micro-manager-unit-test-config.json @@ -8,7 +8,6 @@ }, "simulation_params": { "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], - "micro_output_n": 10, "adaptivity": "True", "adaptivity_type": "local", "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], @@ -16,9 +15,9 @@ "adaptivity_coarsening_constant": 0.3, "adaptivity_refining_constant": 0.4, "adaptivity_every_implicit_iteration": "False" - }, "diagnostics": { + "micro_output_n": 10, "output_micro_sim_solve_time": "True" } } From 168b4fbeafd5326bd19242ba2800884ee067d71b Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 31 May 2023 14:38:44 +0200 Subject: [PATCH 47/87] Correct permalinks in documentation and also simplify names (#47) --- README.md | 4 ++-- docs/README.md | 7 +++---- docs/{usage-configuration.md => configuration.md} | 2 +- ...o-library.md => micro-simulation-convert-to-library.md} | 2 +- docs/{usage-running.md => running.md} | 2 +- 5 files changed, 8 insertions(+), 9 deletions(-) rename docs/{usage-configuration.md => configuration.md} (99%) rename docs/{usage-convert-to-library.md => micro-simulation-convert-to-library.md} (98%) rename docs/{usage-running.md => running.md} (94%) diff --git a/README.md b/README.md index 6ff1c985..feeb0525 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ A tool to facilitate solving two-scale (macro-micro) coupled problems using the ## Start Here -The main documentation is on the [preCICE website](https://precice.org/) TODO: Update to link of documentation. +The main documentation is on the [preCICE website](https://precice.org/tooling-micro-manager-overview.html). Please report any bugs and issues [here](https://github.com/precice/micro-manager/issues) and give us feedback through [one of our community channels](https://precice.org/community-channels.html). @@ -12,7 +12,7 @@ Please report any bugs and issues [here](https://github.com/precice/micro-manage The concept and initial design of the Micro Manager has been discussed in -Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037) +Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037). The Micro Manager can adaptively control micro simulations. The adaptivity strategy is taken from two publications diff --git a/docs/README.md b/docs/README.md index 4f4a3d1e..144771d3 100644 --- a/docs/README.md +++ b/docs/README.md @@ -22,7 +22,6 @@ The Micro Manager is able to couple many micro simulations with one macro simula For a more detailed look, the documentation is split into the following sections: - [Installation](tooling-micro-manager-installation.html) -- [Usage](tooling-micro-manager-usage.html) - - [Code Changes](tooling-micro-manager-code-changes.html) - - [Configuration](tooling-micro-manager-configuration.html) - - [Running](tooling-micro-manager-running.html) +- [Micro simulation as callable library](tooling-micro-manager-micro-simulation-callable-library.html) +- [Configuration](tooling-micro-manager-configuration.html) +- [Running](tooling-micro-manager-running.html) diff --git a/docs/usage-configuration.md b/docs/configuration.md similarity index 99% rename from docs/usage-configuration.md rename to docs/configuration.md index 4412224c..fe882ee0 100644 --- a/docs/usage-configuration.md +++ b/docs/configuration.md @@ -1,6 +1,6 @@ --- title: Configuration of the Micro Manager -permalink: tooling-micro-manager-usage-configuration.html +permalink: tooling-micro-manager-configuration.html keywords: tooling, macro-micro, two-scale summary: The Micro Manager uses a JSON file to configure the coupling. The coupled data has to be specified in the preCICE configuration file. --- diff --git a/docs/usage-convert-to-library.md b/docs/micro-simulation-convert-to-library.md similarity index 98% rename from docs/usage-convert-to-library.md rename to docs/micro-simulation-convert-to-library.md index 2fbe0e92..c4005104 100644 --- a/docs/usage-convert-to-library.md +++ b/docs/micro-simulation-convert-to-library.md @@ -1,6 +1,6 @@ --- title: Convert Your Micro Simulation to Library -permalink: tooling-micro-manager-usage-code-changes.html +permalink: tooling-micro-manager-micro-simulation-callable-library.html keywords: tooling, macro-micro, two-scale summary: You need to create an Python-importable class from your micro simulation code. --- diff --git a/docs/usage-running.md b/docs/running.md similarity index 94% rename from docs/usage-running.md rename to docs/running.md index 971290fb..157e6a0f 100644 --- a/docs/usage-running.md +++ b/docs/running.md @@ -1,6 +1,6 @@ --- title: Running the Micro Manager -permalink: tooling-micro-manager-usage-running.html +permalink: tooling-micro-manager-running.html keywords: tooling, macro-micro, two-scale summary: Run the Micro Manager from the terminal with a configuration file as input argument or from a Python script. --- From 80a3e31e37338bf49d6ef744b75c7255c397fe11 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 31 May 2023 15:54:17 +0200 Subject: [PATCH 48/87] Modify docs/README.md --- docs/README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/README.md b/docs/README.md index 144771d3..4d1f4c0f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,24 +2,24 @@ title: The Micro Manager permalink: tooling-micro-manager-overview.html keywords: tooling, macro-micro, two-scale -summary: The Micro Manager is a tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library preCICE. +summary: The Micro Manager is a tool for solving two-scale (macro-micro) coupled problems using the coupling library preCICE. --- ## What is this? -The Micro Manager is a tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://www.precice.org/). +The Micro Manager is a tool for solving coupled problems where the coupling is across scales (for example macro-micro). It is developed as a library extension to the coupling library [preCICE](https://www.precice.org/). ## What can it do? -The Micro Manager is able to couple many micro simulations with one macro simulation. This includes: +The Micro Manager couples many micro simulations with one macro simulation. This includes -- Passing data between micro and macro simulations -- Running micro simulations in parallel using MPI -- Adaptively activating and deactivating micro simulations based on whether their similar exist +- transferring scalar and vector data to and from a large number of micro simulations. +- running micro simulations in parallel using MPI. +- adaptively activating and deactivating micro simulations based on whether their similar exist. ## Documentation -For a more detailed look, the documentation is split into the following sections: +For a more detailed look, the documentation is split into the following sections - [Installation](tooling-micro-manager-installation.html) - [Micro simulation as callable library](tooling-micro-manager-micro-simulation-callable-library.html) From ee66d936d24b5873e99693c55c4be6f58ba4b138 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 31 May 2023 21:05:20 +0200 Subject: [PATCH 49/87] Add content of usage.md into README.md --- docs/README.md | 4 +++- docs/usage.md | 17 ----------------- 2 files changed, 3 insertions(+), 18 deletions(-) delete mode 100644 docs/usage.md diff --git a/docs/README.md b/docs/README.md index 4d1f4c0f..5c1fda8f 100644 --- a/docs/README.md +++ b/docs/README.md @@ -19,7 +19,9 @@ The Micro Manager couples many micro simulations with one macro simulation. This ## Documentation -For a more detailed look, the documentation is split into the following sections +The Micro Manager creates instances of several micro simulations and couples them to one macro simulation, using preCICE. + +An existing micro simulation code needs to be converted into a library with a specific class name which has functions with specific names. For a macro-micro coupled problem, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. To setup a macro-micro coupled simulation using the Micro Manager, follow the steps - [Installation](tooling-micro-manager-installation.html) - [Micro simulation as callable library](tooling-micro-manager-micro-simulation-callable-library.html) diff --git a/docs/usage.md b/docs/usage.md deleted file mode 100644 index 93ee1d27..00000000 --- a/docs/usage.md +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: Usage -permalink: tooling-micro-manager-usage.html -keywords: tooling, macro-micro, two-scale ---- - -## Using the Micro Manager - -The Micro Manager facilitates two-scale coupling between one macro-scale and many micro-scale simulations. It creates instances of several micro simulations and couples them to one macro simulation, using preCICE. - -An existing micro simulation code needs to be converted into a library with a specific class name which has functions with specific names. The next section describes the required library structure of the micro simulation code. On the other hand, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. - -To run your first macro-micro simulation using the Micro Manager follow the three steps: - -1. [Create a micro simulation library](tooling-micro-manager-usage-code-changes.html) -2. [Create a configuration file](tooling-micro-manager-usage-configuration.html) -3. [Run the Micro Manager](tooling-micro-manager-usage-running.html) From 295d0f9f9c57619c81a8f66f52e06f69e111f109 Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Thu, 1 Jun 2023 12:21:58 +0200 Subject: [PATCH 50/87] Add user option for similarity measures (#40) * Add options for similarity functions and L2 measure * Change l2 norm * Undo some config changes * Add relative L1 and L2 distances * Convert to only one call to get similarity measure Co-authored-by: Ishaan Desai * Move similarity measures in separate functions * Add return documentation of get_similarity_measure * Change division to maximum and add tests * Move tests to `test_adaptivity` * Change test json path * Update CHANGELOG.md * Fix file name in adaptivity test * Formatting * Update json test path * Add documentation on similarity measures * Add documentation on the similarity measures * Update CHANGELOG.md Co-authored-by: Ishaan Desai * Update docs/usage-configuration.md Co-authored-by: Ishaan Desai * Add comment on increasing the dimension --------- Co-authored-by: Ishaan Desai --- CHANGELOG.md | 3 +- docs/configuration.md | 1 + examples/micro-manager-adaptivity-config.json | 2 +- micro_manager/adaptivity/adaptivity.py | 131 +++++++++++++++--- micro_manager/config.py | 17 +++ ...o-manager-unit-test-adaptivity-config.json | 3 +- tests/unit/test_adaptivity.py | 23 +++ 7 files changed, 157 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c8a700a..8461eb15 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## latest +- Add norm-based (L1 and L2) support for functions in similarity distance calculation with absolute and relative variants https://github.com/precice/micro-manager/pull/40 - New domain decomposition strategy based on user input of number of processors along each axis https://github.com/precice/micro-manager/pull/41 - Add pickling support for C++ solver dummy https://github.com/precice/micro-manager/pull/30 - Add C++ solver dummy to show how a C++ micro simulation can be controlled by the Micro Manager https://github.com/precice/micro-manager/pull/22 @@ -9,7 +10,7 @@ ## v0.2.1 -- Fixing the broken action workflow `run-macro-micro-dummy` +- Fixing the broken action workflow `run-macro-micro-dummy` ## v0.2.0 diff --git a/docs/configuration.md b/docs/configuration.md index fe882ee0..f3a7e68e 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -43,6 +43,7 @@ Parameter | Description Parameter | Description --- | --- `macro_domain_bounds`| Minimum and maximum bounds of the macro-domain, having the format `[xmin, xmax, ymin, ymax, zmin, zmax]` in 3D and `[xmin, xmax, ymin, ymax]` in 2D. +*optional:* `adaptivity_similarity_measure`| Similarity measure to be used for adaptivity. Can be either `L1`, `L2`, `L1rel` or `L2rel`. By default, `L1` is used. The `rel` variants calculate the respective relative norms. *optional:* Domain decomposition parameters | See section on [Domain decomposition](#domain-decomposition). But default, the Micro Manager assumes that it will be run in serial. *optional:* Adaptivity parameters | See section on [Adaptivity](#adaptivity). By default, adaptivity is disabled. diff --git a/examples/micro-manager-adaptivity-config.json b/examples/micro-manager-adaptivity-config.json index 68de39fc..fffc488e 100644 --- a/examples/micro-manager-adaptivity-config.json +++ b/examples/micro-manager-adaptivity-config.json @@ -9,7 +9,7 @@ "simulation_params": { "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], "adaptivity": "True", - "adaptivity_type": "local", + "adaptivity_type": "local", "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], "adaptivity_history_param": 0.5, "adaptivity_coarsening_constant": 0.3, diff --git a/micro_manager/adaptivity/adaptivity.py b/micro_manager/adaptivity/adaptivity.py index fbaa46cd..32eea7d6 100644 --- a/micro_manager/adaptivity/adaptivity.py +++ b/micro_manager/adaptivity/adaptivity.py @@ -2,6 +2,7 @@ Functionality for adaptive initialization and control of micro simulations """ import numpy as np +from typing import Callable class AdaptivityCalculator: @@ -15,6 +16,8 @@ def __init__(self, configurator, global_ids) -> None: # Use set to make the "in" functionality faster for large lists self._global_ids_of_local_sims = global_ids + self._similarity_measure = self._get_similarity_measure(configurator.get_adaptivity_similarity_measure()) + def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np.ndarray) -> np.ndarray: """ Calculate metric which determines if two micro simulations are similar enough to have one of them deactivated. @@ -22,7 +25,7 @@ def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np Parameters ---------- dt : float - Time step + Timestep similarity_dists : numpy array 2D array having similarity distances between each micro simulation pair data : numpy array @@ -36,25 +39,13 @@ def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np _similarity_dists = np.copy(similarity_dists) if data.ndim == 1: - dim = 0 - elif data.ndim == 2: - _, dim = data.shape - - number_of_sims, _ = _similarity_dists.shape - - for counter_1, id_1 in enumerate(range(number_of_sims)): - for counter_2, id_2 in enumerate(range(number_of_sims)): - data_diff = 0 - if id_1 != id_2: - if dim: - for d in range(dim): - data_diff += abs(data[counter_1, d] - data[counter_2, d]) - else: - data_diff = abs(data[counter_1] - data[counter_2]) - - _similarity_dists[id_1, id_2] += dt * data_diff - else: - _similarity_dists[id_1, id_2] = 0 + # If the adaptivity-data is a scalar for each simulation, + # expand the dimension to make it a 2D array to unify the calculation. + # The axis is later reduced with a norm. + data = np.expand_dims(data, axis=1) + + data_diff = self._similarity_measure(data) + _similarity_dists += dt * data_diff return _similarity_dists @@ -107,3 +98,103 @@ def _check_for_deactivation( if similarity_dists[active_id, active_id_2] < self._coarse_tol: return True return False + + def _get_similarity_measure(self, similarity_measure: str) -> Callable[[np.ndarray], np.ndarray]: + """ + Get similarity measure to be used for similarity calculation + + Parameters + ---------- + similarity_measure : str + String specifying the similarity measure to be used + + Returns + ------- + similarity_measure : function + Function to be used for similarity calculation. Takes data as input and returns similarity measure + """ + if similarity_measure == 'L1': + return self._l1 + elif similarity_measure == 'L2': + return self._l2 + elif similarity_measure == 'L1rel': + return self._l1rel + elif similarity_measure == 'L2rel': + return self._l2rel + else: + raise ValueError( + 'Similarity measure not supported. Currently supported similarity measures are "L1", "L2", "L1rel", "L2rel".') + + def _l1(self, data: np.ndarray) -> np.ndarray: + """ + Calculate L1 norm of data + + Parameters + ---------- + data : numpy array + Data to be used in similarity distance calculation + + Returns + ------- + similarity_dists : numpy array + Updated 2D array having similarity distances between each micro simulation pair + """ + return np.linalg.norm(data[np.newaxis, :] - data[:, np.newaxis], ord=1, axis=-1) + + def _l2(self, data: np.ndarray) -> np.ndarray: + """ + Calculate L2 norm of data + + Parameters + ---------- + data : numpy array + Data to be used in similarity distance calculation + + Returns + ------- + similarity_dists : numpy array + Updated 2D array having similarity distances between each micro simulation pair + """ + return np.linalg.norm(data[np.newaxis, :] - data[:, np.newaxis], ord=2, axis=-1) + + def _l1rel(self, data: np.ndarray) -> np.ndarray: + """ + Calculate L1 norm of relative difference of data. + The relative difference is calculated by dividing the difference of two data points by the maximum of the two data points. + + Parameters + ---------- + data : numpy array + Data to be used in similarity distance calculation + + Returns + ------- + similarity_dists : numpy array + Updated 2D array having similarity distances between each micro simulation pair + """ + pointwise_diff = data[np.newaxis, :] - data[:, np.newaxis] + # divide by data to get relative difference + # divide i,j by max(data[i],data[j]) to get relative difference + relative = np.nan_to_num((pointwise_diff / np.maximum(data[np.newaxis, :], data[:, np.newaxis]))) + return np.linalg.norm(relative, ord=1, axis=-1) + + def _l2rel(self, data: np.ndarray) -> np.ndarray: + """ + Calculate L2 norm of relative difference of data. + The relative difference is calculated by dividing the difference of two data points by the maximum of the two data points. + + Parameters + ---------- + data : numpy array + Data to be used in similarity distance calculation + + Returns + ------- + similarity_dists : numpy array + Updated 2D array having similarity distances between each micro simulation pair + """ + pointwise_diff = data[np.newaxis, :] - data[:, np.newaxis] + # divide by data to get relative difference + # divide i,j by max(data[i],data[j]) to get relative difference + relative = np.nan_to_num((pointwise_diff / np.maximum(data[np.newaxis, :], data[:, np.newaxis]))) + return np.linalg.norm(relative, ord=2, axis=-1) diff --git a/micro_manager/config.py b/micro_manager/config.py index e4437ccc..15cecfc7 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -43,6 +43,7 @@ def __init__(self, config_filename): self._adaptivity_coarsening_constant = 0.5 self._adaptivity_refining_constant = 0.5 self._adaptivity_every_implicit_iteration = False + self._adaptivity_similarity_measure = "L1" self.read_json(config_filename) @@ -124,6 +125,13 @@ def read_json(self, config_filename): self._adaptivity_history_param = data["simulation_params"]["adaptivity_history_param"] self._adaptivity_coarsening_constant = data["simulation_params"]["adaptivity_coarsening_constant"] self._adaptivity_refining_constant = data["simulation_params"]["adaptivity_refining_constant"] + + if "adaptivity_similarity_measure" in data["simulation_params"]: + self._adaptivity_similarity_measure = data["simulation_params"]["adaptivity_similarity_measure"] + else: + print("No similarity measure provided, using L1 norm as default") + self._adaptivity_similarity_measure = "L1" + adaptivity_every_implicit_iteration = data["simulation_params"]["adaptivity_every_implicit_iteration"] if adaptivity_every_implicit_iteration == "True": @@ -321,6 +329,15 @@ def get_adaptivity_refining_const(self): """ return self._adaptivity_refining_constant + def get_adaptivity_similarity_measure(self): + """ + + Returns + ------- + + """ + return self._adaptivity_similarity_measure + def is_adaptivity_required_in_every_implicit_iteration(self): """ diff --git a/tests/unit/micro-manager-unit-test-adaptivity-config.json b/tests/unit/micro-manager-unit-test-adaptivity-config.json index e629573f..0bbe3abd 100644 --- a/tests/unit/micro-manager-unit-test-adaptivity-config.json +++ b/tests/unit/micro-manager-unit-test-adaptivity-config.json @@ -14,6 +14,7 @@ "adaptivity_history_param": 0.5, "adaptivity_coarsening_constant": 0.3, "adaptivity_refining_constant": 0.4, - "adaptivity_every_implicit_iteration": "False" + "adaptivity_every_implicit_iteration": "False", + "adaptivity_similarity_measure": "L1" } } diff --git a/tests/unit/test_adaptivity.py b/tests/unit/test_adaptivity.py index 08b8e76d..fab61063 100644 --- a/tests/unit/test_adaptivity.py +++ b/tests/unit/test_adaptivity.py @@ -1,5 +1,6 @@ from unittest import TestCase from micro_manager.adaptivity.local_adaptivity import LocalAdaptivityCalculator +from micro_manager.adaptivity.adaptivity import AdaptivityCalculator from micro_manager.config import Config import numpy as np @@ -170,3 +171,25 @@ def get_associated_active_local_id(self): self.assertEqual(dummy_micro_sims[0].get_associated_active_local_id(), 2) self.assertEqual(dummy_micro_sims[1].get_associated_active_local_id(), 2) self.assertEqual(dummy_micro_sims[3].get_associated_active_local_id(), 4) + + def test_adaptivity_norms(self): + c = Config('micro-manager-unit-test-adaptivity-config.json') + calc = AdaptivityCalculator(c, [0, 1, 2, 3, 4]) + + fake_data = np.array([[1], [2], [3]]) + self.assertTrue(np.allclose(calc._l1(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) + # norm taken over last axis -> same as before + self.assertTrue(np.allclose(calc._l2(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) + self.assertTrue(np.allclose(calc._l1rel(fake_data), np.array( + [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) + self.assertTrue(np.allclose(calc._l2rel(fake_data), np.array( + [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) + + fake_2d_data = np.array([[1, 2], [3, 4]]) + self.assertTrue(np.allclose(calc._l1(fake_2d_data), np.array([[0, 4], [4, 0]]))) + self.assertTrue(np.allclose(calc._l2(fake_2d_data), np.array([[0, np.sqrt((1 - 3)**2 + (2 - 4)**2)], + [np.sqrt((1 - 3)**2 + (2 - 4)**2), 0]]))) + self.assertTrue(np.allclose(calc._l1rel(fake_2d_data), np.array( + [[0, abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4))], [abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4)), 0]]))) + self.assertTrue(np.allclose(calc._l2rel(fake_2d_data), np.array([[0, np.sqrt( + (1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2)], [np.sqrt((1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2), 0]]))) From b04dadb9ba5bfb49a768edd906b411cb27db391c Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 1 Jun 2023 13:41:13 +0200 Subject: [PATCH 51/87] Create sub-heading adaptivity under simulation_params to shorten config variable names (#49) * Create sub-heading adaptivity under simulation_params to shorten the configuration variable names * Change adaptivity settings to the new format in unit tests config * Remove unnecessary underscore * Change adaptivity settings to the new format in solver dummy configs * Remove adaptivity: False config entries --- docs/configuration.md | 32 ++++++++++--------- examples/micro-manager-adaptivity-config.json | 15 +++++---- examples/micro-manager-config.json | 3 +- micro_manager/config.py | 24 +++++++------- .../micro-manager-config-adaptivity.json | 15 +++++---- .../micro-manager-config-parallel-1.json | 3 +- .../micro-manager-config-parallel-2.json | 3 +- ...o-manager-unit-test-adaptivity-config.json | 17 +++++----- .../unit/micro-manager-unit-test-config.json | 15 +++++---- 9 files changed, 64 insertions(+), 63 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index f3a7e68e..0767c9e3 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -72,23 +72,24 @@ For a 2D domain, only two values need to be set `axiswise_ranks`. ## Adaptivity +{% note %} This feature is optional. {% endnote %} + The Micro Manager can adaptively control micro simulations. The adaptivity strategy is taken from 1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. [10.1016/j.jcp.2012.12.025](https://doi.org/10.1016/j.jcp.2012.12.025). 2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). -To turn on adaptivity, the following options need to be set in `simulation_params`: +To turn on adaptivity, the following options need to be set in `simulation_params` under the sub-heading `adaptivity`: Parameter | Description --- | --- -`adaptivity` | Set as `True` to turn on adaptivity (`False` by default). -`adaptivity_type` | Set to either `local` or `global`. The type of adaptivity matters when the Micro Manager is run in parallel. `local` means comparing micro simulations within a local partitioned domain for similarity. `global` means comparing micro simulations from all partitions, so over the entire domain. -`adaptivity_data` | List of names of data which are to be used to calculate if micro-simulations are similar or not. For example `["temperature", "porosity"]`. -`adaptivity_history_param` | History parameter $$ \Lambda $$, set as $$ \Lambda >= 0 $$. -`adaptivity_coarsening_constant` | Coarsening constant $$ C_c $$, set as $$ C_c < 1 $$. -`adaptivity_refining_constant` | Refining constant $$ C_r $$, set as $$ C_r >= 0 $$. -`adaptivity_every_implicit_iteration` | If True, adaptivity is calculated in every implicit iteration.
If False, adaptivity is calculated once at the start of the time window and then reused in every implicit time iteration. +`type` | Set to either `local` or `global`. The type of adaptivity matters when the Micro Manager is run in parallel. `local` means comparing micro simulations within a local partitioned domain for similarity. `global` means comparing micro simulations from all partitions, so over the entire domain. +`data` | List of names of data which are to be used to calculate if micro-simulations are similar or not. For example `["temperature", "porosity"]`. +`history_param` | History parameter $$ \Lambda $$, set as $$ \Lambda >= 0 $$. +`coarsening_constant` | Coarsening constant $$ C_c $$, set as $$ C_c < 1 $$. +`refining_constant` | Refining constant $$ C_r $$, set as $$ C_r >= 0 $$. +` If False, adaptivity is calculated once at the start of the time window and then reused in every implicit time iteration. All variables are chosen from the [second publication](https://doi.org/10.1016/j.amc.2020.125933) mentioned above. @@ -97,13 +98,14 @@ Example of adaptivity configuration ```json "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], - "adaptivity": "True", - "adaptivity_type": "local", - "adaptivity_data": ["temperature", "porosity"], - "adaptivity_history_param": 0.5, - "adaptivity_coarsening_constant": 0.3, - "adaptivity_refining_constant": 0.4, - "adaptivity_every_implicit_iteration": "True" + "adaptivity" { + "type": "local", + "data": ["temperature", "porosity"], + "history_param": 0.5, + "coarsening_constant": 0.3, + "refining_constant": 0.4, + "every_implicit_iteration": "True" + } } ``` diff --git a/examples/micro-manager-adaptivity-config.json b/examples/micro-manager-adaptivity-config.json index fffc488e..76a7fd78 100644 --- a/examples/micro-manager-adaptivity-config.json +++ b/examples/micro-manager-adaptivity-config.json @@ -8,13 +8,14 @@ }, "simulation_params": { "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], - "adaptivity": "True", - "adaptivity_type": "local", - "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], - "adaptivity_history_param": 0.5, - "adaptivity_coarsening_constant": 0.3, - "adaptivity_refining_constant": 0.4, - "adaptivity_every_implicit_iteration": "True" + "adaptivity": { + "type": "local", + "data": ["macro-scalar-data", "macro-vector-data"], + "history_param": 0.5, + "coarsening_constant": 0.3, + "refining_constant": 0.4, + "every_implicit_iteration": "True" + } }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/examples/micro-manager-config.json b/examples/micro-manager-config.json index fd52c953..4be15a61 100644 --- a/examples/micro-manager-config.json +++ b/examples/micro-manager-config.json @@ -7,8 +7,7 @@ "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} }, "simulation_params": { - "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], - "adaptivity": "False" + "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0] }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/micro_manager/config.py b/micro_manager/config.py index 15cecfc7..7d292326 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -101,38 +101,36 @@ def read_json(self, config_filename): print("Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") try: - if data["simulation_params"]["adaptivity"] == "True": + if data["simulation_params"]["adaptivity"]: self._adaptivity = True - elif data["simulation_params"]["adaptivity"] == "False": - self._adaptivity = False else: - raise Exception("Adaptivity can be either True or False.") + self._adaptivity = False except BaseException: print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations in all time steps.") if self._adaptivity: - if data["simulation_params"]["adaptivity_type"] == "local": + if data["simulation_params"]["adaptivity"]["type"] == "local": self._adaptivity_type = "local" - elif data["simulation_params"]["adaptivity_type"] == "global": + elif data["simulation_params"]["adaptivity"]["type"] == "global": self._adaptivity_type = "global" else: raise Exception("Adaptivity type can be either local or global.") exchange_data = {**self._read_data_names, **self._write_data_names} - for dname in data["simulation_params"]["adaptivity_data"]: + for dname in data["simulation_params"]["adaptivity"]["data"]: self._data_for_adaptivity[dname] = exchange_data[dname] - self._adaptivity_history_param = data["simulation_params"]["adaptivity_history_param"] - self._adaptivity_coarsening_constant = data["simulation_params"]["adaptivity_coarsening_constant"] - self._adaptivity_refining_constant = data["simulation_params"]["adaptivity_refining_constant"] + self._adaptivity_history_param = data["simulation_params"]["adaptivity"]["history_param"] + self._adaptivity_coarsening_constant = data["simulation_params"]["adaptivity"]["coarsening_constant"] + self._adaptivity_refining_constant = data["simulation_params"]["adaptivity"]["refining_constant"] - if "adaptivity_similarity_measure" in data["simulation_params"]: - self._adaptivity_similarity_measure = data["simulation_params"]["adaptivity_similarity_measure"] + if "similarity_measure" in data["simulation_params"]["adaptivity"]: + self._adaptivity_similarity_measure = data["simulation_params"]["adaptivity"]["similarity_measure"] else: print("No similarity measure provided, using L1 norm as default") self._adaptivity_similarity_measure = "L1" - adaptivity_every_implicit_iteration = data["simulation_params"]["adaptivity_every_implicit_iteration"] + adaptivity_every_implicit_iteration = data["simulation_params"]["adaptivity"]["every_implicit_iteration"] if adaptivity_every_implicit_iteration == "True": self._adaptivity_every_implicit_iteration = True diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json index 9cfa6e6b..99392c61 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json @@ -8,13 +8,14 @@ }, "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], - "adaptivity": "True", - "adaptivity_type": "local", - "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], - "adaptivity_history_param": 0.5, - "adaptivity_coarsening_constant": 0.3, - "adaptivity_refining_constant": 0.4, - "adaptivity_every_implicit_iteration": "True" + "adaptivity": { + "type": "local", + "data": ["macro-scalar-data", "macro-vector-data"], + "history_param": 0.5, + "coarsening_constant": 0.3, + "refining_constant": 0.4, + "every_implicit_iteration": "True" + } }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json index 4e62524b..8e71edcf 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json @@ -8,8 +8,7 @@ }, "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], - "axiswise_ranks": [1, 1, 2], - "adaptivity": "False" + "axiswise_ranks": [1, 1, 2] }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json index 994bedd2..487ae129 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json @@ -8,8 +8,7 @@ }, "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], - "axiswise_ranks": [1, 2, 3], - "adaptivity": "False" + "axiswise_ranks": [1, 2, 3] }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/tests/unit/micro-manager-unit-test-adaptivity-config.json b/tests/unit/micro-manager-unit-test-adaptivity-config.json index 0bbe3abd..cc04492b 100644 --- a/tests/unit/micro-manager-unit-test-adaptivity-config.json +++ b/tests/unit/micro-manager-unit-test-adaptivity-config.json @@ -8,13 +8,14 @@ }, "simulation_params": { "macro_domain_bounds": [], - "adaptivity": "True", - "adaptivity_type": "local", - "adaptivity_data": [], - "adaptivity_history_param": 0.5, - "adaptivity_coarsening_constant": 0.3, - "adaptivity_refining_constant": 0.4, - "adaptivity_every_implicit_iteration": "False", - "adaptivity_similarity_measure": "L1" + "adaptivity": { + "type": "local", + "data": [], + "history_param": 0.5, + "coarsening_constant": 0.3, + "refining_constant": 0.4, + "every_implicit_iteration": "False", + "similarity_measure": "L1" + } } } diff --git a/tests/unit/micro-manager-unit-test-config.json b/tests/unit/micro-manager-unit-test-config.json index 222610b6..ccc829d7 100644 --- a/tests/unit/micro-manager-unit-test-config.json +++ b/tests/unit/micro-manager-unit-test-config.json @@ -8,13 +8,14 @@ }, "simulation_params": { "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0], - "adaptivity": "True", - "adaptivity_type": "local", - "adaptivity_data": ["macro-scalar-data", "macro-vector-data"], - "adaptivity_history_param": 0.5, - "adaptivity_coarsening_constant": 0.3, - "adaptivity_refining_constant": 0.4, - "adaptivity_every_implicit_iteration": "False" + "adaptivity": { + "type": "local", + "data": ["macro-scalar-data", "macro-vector-data"], + "history_param": 0.5, + "coarsening_constant": 0.3, + "refining_constant": 0.4, + "every_implicit_iteration": "False" + } }, "diagnostics": { "micro_output_n": 10, From fca22f80d51a1e310b189b8383d7bd6010688e0f Mon Sep 17 00:00:00 2001 From: erikscheurer <84399192+erikscheurer@users.noreply.github.com> Date: Thu, 1 Jun 2023 14:13:06 +0200 Subject: [PATCH 52/87] Move to a relative path configuration (#48) * Move to a relative path configuration * Change github action from relative to absolute path * Remove ./ from path of micro-manager-config.json * Change wording in configuration documentation --------- Co-authored-by: Ishaan Desai --- .github/workflows/run-macro-micro-dummy.yml | 8 ++++---- docs/configuration.md | 4 ++-- docs/running.md | 2 +- examples/README.md | 6 ++++-- micro_manager/config.py | 2 +- 5 files changed, 12 insertions(+), 10 deletions(-) diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 0560b4ce..211b847c 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -35,12 +35,12 @@ jobs: - name: Run python macro-micro dummy timeout-minutes: 3 working-directory: micro-manager/examples - run: python3 python-dummy/run_micro_manager.py --config ../micro-manager-config.json & python3 macro_dummy.py + run: python3 python-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py - name: Run adaptive python macro-micro dummy timeout-minutes: 3 working-directory: micro-manager/examples - run: python3 python-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py + run: python3 python-dummy/run_micro_manager.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py - name: Run c++ macro-micro dummy timeout-minutes: 3 @@ -50,9 +50,9 @@ jobs: pip install pybind11 c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) cd ../ - python3 cpp-dummy/run_micro_manager.py --config ../micro-manager-config.json & python3 macro_dummy.py + python3 cpp-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py - name: Run adaptive c++ macro-micro dummy timeout-minutes: 3 working-directory: micro-manager/examples - run: python3 cpp-dummy/run_micro_manager.py --config ../micro-manager-adaptivity-config.json & python3 macro_dummy.py + run: python3 cpp-dummy/run_micro_manager.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py diff --git a/docs/configuration.md b/docs/configuration.md index 0767c9e3..6fdc997d 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -27,13 +27,13 @@ The Micro Manager is configured at runtime using a JSON file. An example configu There are three main sections in the configuration file, the `coupling_params`, the `simulation_params` and the optional `diagnostics`. -The file containing the Python importable micro simulation class is specified in the `micro_file_name` parameter. +The path to the file containing the Python importable micro simulation class is specified in the `micro_file_name` parameter. If the file is not in the working directory, give the relative path. ## Coupling Parameters Parameter | Description --- | --- -`config_file_name` | Path to the preCICE XML configuration file. +`config_file_name` | Path to the preCICE XML configuration file from the current working directory. `macro_mesh_name` | Name of the macro mesh as stated in the preCICE configuration. `read_data_names` | A Python dictionary with the names of the data to be read from preCICE as keys and `"scalar"` or `"vector"` as values depending on the nature of the data. `write_data_names` | A Python dictionary with the names of the data to be written to preCICE as keys and `"scalar"` or `"vector"` as values depending on the nature of the data. diff --git a/docs/running.md b/docs/running.md index 157e6a0f..44c4dd72 100644 --- a/docs/running.md +++ b/docs/running.md @@ -5,7 +5,7 @@ keywords: tooling, macro-micro, two-scale summary: Run the Micro Manager from the terminal with a configuration file as input argument or from a Python script. --- -The Micro Manager is run directly from the terminal by providing the configuration file as an input argument in the following way +The Micro Manager is run directly from the terminal by providing the path to the configuration file as an input argument in the following way ```bash micro_manager micro-manager-config.json diff --git a/examples/README.md b/examples/README.md index 6375bb60..88744b50 100644 --- a/examples/README.md +++ b/examples/README.md @@ -10,6 +10,8 @@ python macro-dummy.py python python-dummy/run_micro_manager.py ``` +Note that running `micro_manager micro-manager-config.json` from the terminal will not work, as the path in the configuration file is relative to the current working directory. See [#36](https://github.com/precice/micro-manager/issues/36) for more information. + ## C++ The C++ solverdummies have to be compiled first using [`pybind11`](https://pybind11.readthedocs.io/en/stable/index.html). To do so, install `pybind11` using `pip`: @@ -25,7 +27,7 @@ c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_c
Explanation -The command above compiles the C++ solverdummy and creates a shared library that can be imported from python using `pybind11`. +The command above compiles the C++ solverdummy and creates a shared library that can be imported from python using `pybind11`. - The `$(python3 -m pybind11 --includes)` part is necessary to include the correct header files for `pybind11`. - The `$(python3-config --extension-suffix)` part is necessary to create the correct file extension for the shared library. For more information, see the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/compiling.html#building-manually). - If you have multiple versions of Python installed, you might have to replace `python3-config` with `python3.8-config` or similar. @@ -39,7 +41,7 @@ python macro_dummy.py python cpp-dummy/run_micro_manager.py ``` -When changing the C++ solverdummy to your own solver, make sure to change the `PYBIND11_MODULE` in `micro_cpp_dummy.cpp` to the name that you want to compile to. +When changing the C++ solverdummy to your own solver, make sure to change the `PYBIND11_MODULE` in `micro_cpp_dummy.cpp` to the name that you want to compile to. For example, if you want to import the module as `my_solver`, change the line to `PYBIND11_MODULE(my_solver, m) {`. Then, change the `micro_file_name` in `micro-manager-config.json` to `my_solver`. ### Adaptivity diff --git a/micro_manager/config.py b/micro_manager/config.py index 7d292326..a7e447aa 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -56,7 +56,7 @@ def read_json(self, config_filename): config_filename : string Name of the JSON configuration file """ - folder = os.path.dirname(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0]), config_filename)) + folder = os.path.dirname(os.path.join(os.getcwd(), config_filename)) path = os.path.join(folder, os.path.basename(config_filename)) with open(path, "r") as read_file: data = json.load(read_file) From d5e8a1e52eeaf884d9301fac2d77d26ae24311dc Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sat, 3 Jun 2023 20:11:42 +0200 Subject: [PATCH 53/87] Reworking documentation (#50) * Incorporate first feedback * Add Manager Solution image to overview page * Using PNG image and not a PDF image * Further incorporation of feedback * Reworking converting micro simulation part * Incorporate feedback for Configuration section of documentation * Add workflows to check markdown and links, and incorporate further feedback --- .github/workflows/check-links.yml | 14 ++++ .github/workflows/check-markdown.yml | 13 ++++ .markdown-link-check-config.json | 3 + .markdownlint.json | 5 ++ docs/README.md | 20 +++--- docs/configuration.md | 74 ++++++++++++++------ docs/images/ManagerSolution.png | Bin 0 -> 103959 bytes docs/installation.md | 38 ++++++---- docs/micro-simulation-convert-to-library.md | 43 +++++------- docs/running.md | 8 ++- examples/README.md | 6 +- 11 files changed, 153 insertions(+), 71 deletions(-) create mode 100644 .github/workflows/check-links.yml create mode 100644 .github/workflows/check-markdown.yml create mode 100644 .markdown-link-check-config.json create mode 100644 .markdownlint.json create mode 100644 docs/images/ManagerSolution.png diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml new file mode 100644 index 00000000..f7e0ce61 --- /dev/null +++ b/.github/workflows/check-links.yml @@ -0,0 +1,14 @@ +name: Check links (manual) +on: workflow_dispatch +jobs: + check_links: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v2 + - name: Check links in markdown files (markdown-link-check) + uses: gaurav-nelson/github-action-markdown-link-check@v1 + with: + use-quiet-mode: 'yes' + use-verbose-mode: 'no' + config-file: '.markdown-link-check-config.json' diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml new file mode 100644 index 00000000..39819fb4 --- /dev/null +++ b/.github/workflows/check-markdown.yml @@ -0,0 +1,13 @@ +name: Lint docs +on: [push, pull_request] +jobs: + check_md: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v2 + - name: Lint markdown files (markdownlint) + uses: articulate/actions-markdownlint@v1 + with: + config: .markdownlint.json + files: '.' diff --git a/.markdown-link-check-config.json b/.markdown-link-check-config.json new file mode 100644 index 00000000..3fff32c2 --- /dev/null +++ b/.markdown-link-check-config.json @@ -0,0 +1,3 @@ +{ + "aliveStatusCodes": [429, 200] +} \ No newline at end of file diff --git a/.markdownlint.json b/.markdownlint.json new file mode 100644 index 00000000..979cb285 --- /dev/null +++ b/.markdownlint.json @@ -0,0 +1,5 @@ +{ + "MD013": false, + "MD033": false, + "MD034": false +} diff --git a/docs/README.md b/docs/README.md index 5c1fda8f..849d2f79 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,28 +2,28 @@ title: The Micro Manager permalink: tooling-micro-manager-overview.html keywords: tooling, macro-micro, two-scale -summary: The Micro Manager is a tool for solving two-scale (macro-micro) coupled problems using the coupling library preCICE. +summary: A tool to manage many micro simulations and couple them to a macro simulation via preCICE. --- ## What is this? -The Micro Manager is a tool for solving coupled problems where the coupling is across scales (for example macro-micro). It is developed as a library extension to the coupling library [preCICE](https://www.precice.org/). +The Micro Manager manages many simulations on a micro scale and couples them to one simulation on a macro scale. For the coupling itself, it heavily relies on the coupling library [preCICE](https://precice.org/index.html). + +![Micro Manager strategy schematic](images/ManagerSolution.png) ## What can it do? -The Micro Manager couples many micro simulations with one macro simulation. This includes +The Micro Manager couples many micro simulations with one macro simulation. This includes ... -- transferring scalar and vector data to and from a large number of micro simulations. -- running micro simulations in parallel using MPI. -- adaptively activating and deactivating micro simulations based on whether their similar exist. +- ... transferring scalar and vector data to and from a large number of micro simulations. +- ... running micro simulations in parallel using MPI. +- ... adaptively activating and deactivating micro simulations based on a similarity calculation. ## Documentation -The Micro Manager creates instances of several micro simulations and couples them to one macro simulation, using preCICE. - -An existing micro simulation code needs to be converted into a library with a specific class name which has functions with specific names. For a macro-micro coupled problem, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. To setup a macro-micro coupled simulation using the Micro Manager, follow the steps +To use the Micro Manager for a macro-micro coupling, your micro simulation code needs to be in a library format with a specific class name and functions with specific names. For a macro-micro coupled problem, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. To setup a macro-micro coupled simulation using the Micro Manager, follow these steps: - [Installation](tooling-micro-manager-installation.html) -- [Micro simulation as callable library](tooling-micro-manager-micro-simulation-callable-library.html) +- [Preparing micro simulation](tooling-micro-manager-micro-simulation-callable-library.html) - [Configuration](tooling-micro-manager-configuration.html) - [Running](tooling-micro-manager-running.html) diff --git a/docs/configuration.md b/docs/configuration.md index 6fdc997d..01c46f82 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,11 +1,13 @@ --- -title: Configuration of the Micro Manager +title: Configure the Micro Manager permalink: tooling-micro-manager-configuration.html keywords: tooling, macro-micro, two-scale -summary: The Micro Manager uses a JSON file to configure the coupling. The coupled data has to be specified in the preCICE configuration file. +summary: Provide a JSON file to configure the Micro Manager. --- -The Micro Manager is configured at runtime using a JSON file. An example configuration file is +{% note %} In the preCICE XML configuration the Micro Manager is a participant with the name `Micro-Manager`. {% endnote %} + +The Micro Manager is configured with a JSON file. An example configuration file is ```json { @@ -25,10 +27,12 @@ The Micro Manager is configured at runtime using a JSON file. An example configu } ``` -There are three main sections in the configuration file, the `coupling_params`, the `simulation_params` and the optional `diagnostics`. +This example configuration file is in [`examples/micro-manager-config.json`](https://github.com/precice/micro-manager/tree/main/examples/micro-manager-config.json). The path to the file containing the Python importable micro simulation class is specified in the `micro_file_name` parameter. If the file is not in the working directory, give the relative path. +There are three main sections in the configuration file, the `coupling_params`, the `simulation_params` and the optional `diagnostics`. + ## Coupling Parameters Parameter | Description @@ -43,43 +47,66 @@ Parameter | Description Parameter | Description --- | --- `macro_domain_bounds`| Minimum and maximum bounds of the macro-domain, having the format `[xmin, xmax, ymin, ymax, zmin, zmax]` in 3D and `[xmin, xmax, ymin, ymax]` in 2D. -*optional:* `adaptivity_similarity_measure`| Similarity measure to be used for adaptivity. Can be either `L1`, `L2`, `L1rel` or `L2rel`. By default, `L1` is used. The `rel` variants calculate the respective relative norms. -*optional:* Domain decomposition parameters | See section on [Domain decomposition](#domain-decomposition). But default, the Micro Manager assumes that it will be run in serial. -*optional:* Adaptivity parameters | See section on [Adaptivity](#adaptivity). By default, adaptivity is disabled. +Domain decomposition parameters | See section on [domain decomposition](#domain-decomposition). But default, the Micro Manager assumes that it will be run in serial. +Adaptivity parameters | See section on [adaptivity](#adaptivity). By default, adaptivity is disabled. -## *Optional*: Diagnostics +## Diagnostics Parameter | Description --- | --- -`data_from_micro_sims` | A Python dictionary with the names of the data from the micro simulation to be written to VTK files as keys and `"scalar"` or `"vector"` as values. This relies on the [export functionality](configuration-export.html#enabling-exporters) of preCICE and requires the corresponding export tag to be set in the preCICE XML configuration script. -`output_micro_sim_solve_time` | If `True`, the Manager writes the wall clock time of the `solve()` function of each micro simulation to the VTK output. +`data_from_micro_sims` | A Python dictionary with the names of the data from the micro simulation to be written to VTK files as keys and `"scalar"` or `"vector"` as values. +`output_micro_sim_solve_time` | If `True`, the Micro Manager writes the wall clock time of the `solve()` function of each micro simulation. `micro_output_n`| Frequency of calling the optional output functionality of the micro simulation in terms of number of time steps. If not given, `micro_sim.output()` is called every time step. -An example configuration file can be found in [`examples/micro-manager-config.json`](https://github.com/precice/micro-manager/tree/main/examples/micro-manager-config.json). +### Adding diagnostics in the preCICE XML configuration + +If the parameter `data_from_micro_sims` is set, the data to be output needs to be written to preCICE, and an export tag needs to be added for the participant `Micro-Manager`. For example, let us consider the case that the data `porosity`, which is a scalar, needs to be exported. Unless already defined, define the data, and then write it to preCICE. Also, add an export tag. The resulting entries in the XML configuration file look like: + +```xml + + + + ... + + + +``` + +If `output_micro_sim_solve_time` is set, add similar entries for the data `micro_sim_time` in the following way: + +```xml + + + + ... + + + +``` ## Domain decomposition -The Micro Manager can be run in parallel. For a parallel run, set the desired number of paritions in each axis by setting the `axiswise_ranks` variable. For example, if the domain is 3D and the decomposition needs to be two paritions in x, one partition in y, and sixteen partitions in z, the setting is +The Micro Manager can be run in parallel. For a parallel run, set the desired partitions in each axis by setting the `decomposition` parameter. For example, if the domain is 3D and the decomposition needs to be two partitions in x, one partition in y, and sixteen partitions in forz, the setting is ```json "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], - "axiswise_ranks": [2, 1, 16] + "decomposition": [2, 1, 16] } ``` -For a 2D domain, only two values need to be set `axiswise_ranks`. +For a 2D domain, only two values need to be set for `decomposition`. The total number of partitions provided in the `decomposition` should be the same as the number of processors provided in the `mpirun`/`mpiexec` command. ## Adaptivity -{% note %} This feature is optional. {% endnote %} - The Micro Manager can adaptively control micro simulations. The adaptivity strategy is taken from 1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. [10.1016/j.jcp.2012.12.025](https://doi.org/10.1016/j.jcp.2012.12.025). 2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). +All the adaptivity parameters are chosen from the second publication. + To turn on adaptivity, the following options need to be set in `simulation_params` under the sub-heading `adaptivity`: Parameter | Description @@ -89,9 +116,8 @@ Parameter | Description `history_param` | History parameter $$ \Lambda $$, set as $$ \Lambda >= 0 $$. `coarsening_constant` | Coarsening constant $$ C_c $$, set as $$ C_c < 1 $$. `refining_constant` | Refining constant $$ C_r $$, set as $$ C_r >= 0 $$. -` If False, adaptivity is calculated once at the start of the time window and then reused in every implicit time iteration. - -All variables are chosen from the [second publication](https://doi.org/10.1016/j.amc.2020.125933) mentioned above. +`every_implicit_iteration` | If True, adaptivity is calculated in every implicit iteration.
If False, adaptivity is calculated once at the start of the time window and then reused in every implicit time iteration. +`adaptivity_similarity_measure`| Similarity measure to be used for adaptivity. Can be either `L1`, `L2`, `L1rel` or `L2rel`. By default, `L1` is used. The `rel` variants calculate the respective relative norms. This parameter is *optional*. Example of adaptivity configuration @@ -109,14 +135,16 @@ Example of adaptivity configuration } ``` -If adaptivity is turned on, the Micro Manager will attempt to write two scalar data per micro simulation to preCICE, called `active_state` and `active_steps`. +### Adding adaptivity in the preCICE XML configuration + +If adaptivity is used, the Micro Manager will attempt to write two scalar data per micro simulation to preCICE, called `active_state` and `active_steps`. Parameter | Description --- | --- `active_state` | `1` if the micro simulation is active in the time window, and `0` if inactive. `active_steps` | Summation of `active_state` up to the current time window. -The Micro Manager uses the output functionality of preCICE, hence these data sets to the preCICE configuration file. In the mesh and the micro participant add the following lines: +The Micro Manager uses the output functionality of preCICE, hence these data sets need to be manually added to the preCICE configuration file. In the mesh and the participant Micro-Manager add the following lines: ```xml @@ -127,7 +155,7 @@ The Micro Manager uses the output functionality of preCICE, hence these data set - + @@ -135,4 +163,4 @@ The Micro Manager uses the output functionality of preCICE, hence these data set ## Next Steps -After creating a configuration file you are ready to [run the Micro Manager](tooling-micro-manager-usage-running.html). +After creating a configuration file you are ready to [run the Micro Manager](tooling-micro-manager-running.html). diff --git a/docs/images/ManagerSolution.png b/docs/images/ManagerSolution.png new file mode 100644 index 0000000000000000000000000000000000000000..7ecc25e3c65030b45af94f006389fafae26670bb GIT binary patch literal 103959 zcmbTdbyQSs^fpX)OGryecjq7>h;)O5bcoU&LnEDnG}6)`4HDAbAvJWv00ILH!#l?F zd*5$;@yEB$x}C)l4twu=?`vPzz7LV=stUMR6j%rd2)M5lm4;dc~Q zFAL!xm`;iYt_TP;J--*?he>xS_{|e_Wo`M#$43tjkH*GE85xOC@5oNV?TfXJUBRjLhsqx**7*ey1Tn?@9s`cP9Zlp z`uh4)Q&R&210O$rl+qoX4~Ki|yE3!c@h=PJ5IXT(T&@dn%U~yr= z#@6=k5hf)i<>ux_>fM%|oqg=V@ujo^lx%+3`DJ^wHL`tavodmc^JxF(;cTk?>(pM! zz;bPU9W(5sGy=jigjaGew7n3Iv!e|+TNnDpZS4|eLTw_)ZFB^R5^@38skELNjO@BEp^KJoLv?-9V(BqulF}m5d8j*$RapZooJbp$~ezTu(CO_i< zwP?5myjfv>+G+wW9(i;W+j*GFX4}_4;dtwv2Y5l?b(Wh5B3a+7YJf1T{;YH?3}W`U z*=w345&l?+rd*%cK7I;R!>zgdb?RH+;#)*{%HF-6_ePX(9F4vJIQhgVI-3Ma1Oa9i zYIU?9M|qWxhbk_>3~@1VC2F51tjjF;ac5Y|S}wx^9C5*Szqo8%Z$nm{Ol{5`sn2n; zI&)VaM8#Y8d^<<#MUDFiU85g|u*1s}Y-egHc&569?Y^utQa-1scg7JQnBP0*v16CG zw5rN^=W@C7D1wPSm1$zJehGU@Y@f(dECn;(?hXbL;x4} z7iC^wn}Bs9Ur&*D$7ll^1%JT&a3!SbM*_L;eYmd5*m@&IJUc7PzAV%LS$+NcF;+fJ zm^*eLOyLz_+uufb|E$}?6_mcZm3d4$pKt=>K7Pl7G6G5mT(4p**4U`;l`h~Gm<|S` zcIJz;wV2>aY=qyVasJA*wCHGsMQE1X9fQ{G%%5A3&0mSXI4$o|lX|F^lm7BqnTS-QkAR_!&B;p zSyIEdK%eA8*GwZ8z?OX;tDs8o7bMu7TE5BCpXWLG4c=ViUMFRWNBD6aBKRe4#+kGZ z@1=%5uFY6e+;J1Iem)08x}BFRzbSTrIc3SFwjoTQL=c~)T44+X12i$NoDMBej!YI~ zibMOVx$OmqTlOS-`GY!U^TWuqa-xnxgwvUZ%W!p(zfyShs8~$Pd_oKQ7PD+|XG}Sf zXeYGK=6}>3c^sbLLtX`ykZ-ddvq2807`s#lsnZ+MO}0sOG9+uu(hg_i!YgS1C{ROJ zRkh?ty!u6=Y({jKDVB})<}+-ZYGU#)wRCZHrW&jjdrrdovCd%pj~z;0v|yAZ;0;UM z@V+I=judnJZ003HJ8#Vsi2}+zl%8iZQz5yMGU~cOY&ur?F*Kmhloy(#yvM$$3Ij<# z!u1WH03LhJ=bYMh1g_r{m>;TQ-%ScoHnSC7ft0Of6+1_rP4Z9(PsK;q*;WxbU6ff~ zbVa!H11)|Nf#$~)ABmK<1z#UJ06JV zpBGQT6B|@n8s~@f}wkDov zbw{v#d}yE;oJLsXg8S4#7Q1ZGZ=5-pY$rs;TzQq3Seu$LO)$;vnx$gTw|2Wavau?< z>m~g4D`s20d%r4?rb)9-WC@VedOw+E8qjf%QmK=TFMXgmC5wG{<*^9@&_B>5MfbQH zw|0r2MZPYT=>Yi6HpngbpdS-|g-tY&K~Dr|(;p*_?HxnfnpefjOFCC^+xEnsuGJ$UbnWu;#VWFH-xsx&jkU>Z41M@ z#Pe-XNLeU{zLsrf!8hvTZ=8HRH{|$5DTL=kyz0C$M`&xRBIe3sd$>%BoF8pzph)c^+@tHI48$X7!5`%vMp$p!nbk7 zwaqHJG|rHQJC}^^yX?G3T11SdBn-Ni60cL?m0zHz59>^G8^>wexcul1~k+)R6k9ej=QFP4&*QH%ziAUGO%g2oJu=*9+$9 zQPMI2jIHZ^%hd)xbcY`Tl&ZQ%L$(+OX^F?S-&;ofdckt_VC>@-y@VdI1S#=Ph4`Ia zFe?g-qf4x^SP9`b>bs&oDy?ZKskGd=rYV#YOIhJ?PvCdqQ1!`9D63*Vd5VCK zi;^kh^^Of^1r?$3&H#I3zgTSvALxvu+UY$f4Uc3XvUsT;QPk?Gw2ORi8o!R+|$92so4zCrVxlcT90W_=~vJM2ZdIMn9&Q*ce7edy-UXB zi1Ns4&3ej)wzJP-mJ5=-7udYJitU6p^~}`XOS%f6?C@Qmjz%W&2yWGc>3yv=W!-zK z+5ArU1+8GxD(TfLl<%a6B0b{_9(-MD$iKQpI|HZ0FMfU029IcWa^f{}E0S6CE4{_H zuKS^(3Le`qw!GS!zBg^-Q*z7YXMLfTlb)19*|OY|^E7rcI5$#{Gr*g5T|GJj4R)N* zE2yYPtm!Y4Iy+DjD%bW-@R?wsZ&(nZI_xEG&o@c2CC|!H#ATkbmrypTWQD-s%dS$o z(pWryX@b%Zi2aE+&m)H^db1D(sWuudv!sS%9=7iDrMOG?aB7vRD0ch4_`YiPp_gpw z=zn_@TUD*&$7Q__rnY5!65+y3ovFkrfVRcWE?$p@M`$mQSIT3o|( zBFRoDrQG^WyoT)OmY6kUd-ek{LHZtk21NfdRyT=I-bSbKw!#2TFy-=WA8+raXsa) zNn#`Ebi67+Jrs{8J);x#3TK{yct&lQ00Ylb8u>!K=zQH3(4hd&iq4#nL_B zODGgCb?F>bGS|};n!Ie0&@z^kq{83~U@L!KgZK@_e{<({;U*_f(Bn<5XX7^Rb@Bld z8tl!uvHVDnyJwU|EsX)2pvDvd=~*lF9@Rn_OKRHqhh~4GZPa@{$1mzhNw_dkPS~&f z;^0Tta@)bYJ4JI&*wS!=!n8o3_(*BkOHhfm`7F(nz81Z;VTiyO8tIS{s{F;0#5{o} zx9ckb^rT6`S0GY=EYdYm&@Jmn>{|5q&MgnIXh*kMzSh3EFHE(HDM_GAjl|b?nR(6LH5*p9K_ zPm~O0aQ*N;H9#7gVkAFL!V}HgI-&M?+L}xOn%N4?(2niTXDam@mJK;@(03+}jN|QL z0@LzIY8XsZ+w|0f&)JO>_pfNznTjr!4@s<9y}dk~wj%-htRU_gNg^l)^|AU-)2RMW z3L>gk=jFv*IU~Vxa%DY>0xP3;i$(kGTm|75NSp+LmrguQb(^wljjk-Vf?4?Cq4A)*9~e~rhL z!`IK+m)Zlx`vtn{LuZG=V2`#mK;1v-{w@s3U=abh6>bS&%(aht5pW}sILcUf+W$H~)=FUCgHa`B<*>Q=|7uGa~exz@# z>6*+Qc3MPm4RqpL)O16Q#ij*}9skO=L1ZFgGZb{ktUCYbFoyO^sI{v=vQd9T<$Xf&(x8Ln0WRGU zU$8F@s7Zvt5W4L}!0ycSNrZuue8Y4N7`)lD95o+=H9R2ltW^&c+PPtgX$(>}_TBqP zW(cKS{Pi-5l)LrI1MXb8PCxwzcEap8t!||>8_Nx8MMs-X+TAE=itxGf3Z^75)rBC*#%{S2X6LyZE)DnQjT#qYs}=B41k~GN3hGCj z&eyLF&UHOGT_S*v%Ba2j8$0L+A^QZEA+%xcAYsn`^6MJW`2~}vc6`kE zJGtEyYugn{N96J$*0Q5lRZ?*yQh*BrYop5z=37!yKqej)SC|q7^-K%?&)`Fy=v;-E z_VaNBeDYdYC|7=$AI~0{AM@Rt{1jmA!=H@Ptfa@g=P~!9Hh$t^H;~3kmt%t0h~qDC>5_k2kLVyBZuISWA3G0lQ-Uv zsc}+uRRNju>Zh(sVF0umehP53f#h*&i&^bHzOQ9g`dqTXtp6}s?r{`KzOp4LXX1-@ z=OYv~lr3;AhHEn|Q{bD(?Qc6tlRerdl*2#cVA+`SD?GuQJf`ef0*4nedY^Vd${^y zy1ZU}8?n${0Kkgy`yUT3xo=@P`Qmnv&!E>Z7-U(OFZ7eJ)^#UpIO5mW=#x^pq;@|9U_w(Icgp~x%lSX zFEF_J!7W_phyOmRg8TqSK!pD)8G3Hq&l`#2_L5{ljqaou^pba0zH1F$DDWgonije^ zzeF7*e8_nPc$e7H_3&uP8j4Q~tbJ8FPkI%T-J47MC0IzVAK-;2D4(hEgEij}jNP}i z&heY?M^n7=!PcQb0g^|ommU`@JI43_=@NN^wixNLA2J2?*1p@q1W)SHTwggeSx0O5 zS}Dr#WX_hKf^d%yzj z)N`AjS%i{J3L9xoJL8JcY5VJNP2ZEh9HncowR<|*L3{@Z>K{|T?RJknh-q61iw*$G z&Z_uo;nm4rVMebz4@czZ1bd#Es8t3<*tAgb6Zy1i0otop_Apr~!0?8{sA5Nf5GG`B z_f!i@1-%Xej1V2-u>Umb9+*iJlcGFXWGDaHa`Fx$E4fUI{9DfQ+kJ`joq`*@)^Oeq zH~0y$0WMwrlxT;|t_)s4y#@dcwoks^;ZG*CdAk}pdJ`ky-Tz^<2=V6p`qt_4xm%t1 z=^AWKgv#0MK;U8Q$Joez5S`$8H0)&K(n`ncz2??N=o{`AEJ-JB_4#A2r-(~_-z!F5 z=Tj=uxYR`*#&J2`%PzIZlTFQ2$w!;o-c+FJRv>O&Hy^eMm0ww=@LuvQtY#hy`{Fg| z-bF<`ZN#2=w?dG~rREkZDqARNRT@Nc=pOcp+}b44`k4fSQa|f+hO_o0LimsitRhe| z-R9BVJIb|;3MmksTbJ_}qSQtHdmYVmF$Jx6)z^HdT1=@#tN3%xmjx}1{1A8NLcz5O z9qFsksZv?7@A`9rq6MU1+oLjTLWE+s%cz4SEQJ65Oa&T@lHw`iuc z!?v%@1mFI-^6Ufs#Md6Gr{ZQrAg^6TJIJoStu|8B{J9|2c6l!x7t1$(I^0bFRqz`n z{@V5!hc^5R^*K3Gi_&Ii0pc6YazywQ1XM*X5U*uXzOT+6x}DJJcxHntUAVp^*8WMNUsrm{g zhBe!N@X$!z^>04(z-*qLJ&*Au==bgA_NS_fGmT2)c-B{dq$$A$|1gbGK($0IsCIa} zAV*ji=;oh8pM*N9DOVS05S2&4D6fv2!6rk@qel2MBb-WiW|*3eB(cJ7mHG=?@N``_ ze82J(ao^hA1WGN7b}hef=vMGsxy|B}cEq0I9-}E)XRg2z>T;y)Y?Jt|VEVMkOAqeB z0}qY=a_r{&%`_=0`SX2?7UFna+B~ms;HPlQ<=`$P9ef$tB5Y<|d-F8j>l&Gyfk3O> z6Z&B@ZF800IBH>lyRQvv>%<~=lL>nf-wUzYNs=UHbBg=T77Z%gU_@$J`9Y*22ewlxLQKlQp2+!L7>r1O^l4occhSCVy@13U?lx}|l0x)$i zcH#rc!FE#47`Be0*XSU3>KMLw;X!wu3Q%dMYuO}83EHPgk|&zHk7}s0BbG~E1-@H74I{X z+I@cdOj5#gxM8E;e>jeA4t$S7uI$72W+QvjZjt+=7;jT}6O(IN;S~|c<{Se`%x%zB z)SasyD&R&3w=$bvd(BGf&KB#b0TkK>Dx)twdJf-iHqeB2Xz4*%cW&`VM#v>`ki_!u z`cf}N^ZBH-gFy5K!o(#GsSNKVxvShncre*$3R_2pUVA~6kna~O)gz?O8+*LO&Dt?J ze$mBgwL3pOfJowP37)?`H-Rr2`09t}gDg}P{Q8^=z8c{R4Soj~ zs@YBxFo&esZvQkM>*e`=lR_uUGuB`%+cFFT=~gbaKf5|joXGJumA*8v@FP3)ca|m) zrxsxpCs?Y;s$Rq;L_Lj&A`tWsd)WTp$@&{$KA0MStba-^X z%OI0NO~1IaU*;|ZzV~{Ba)OJnYHrrk%GR+S^#tEiu5}t-t{1Fa!|rLW-nyR0+jR-G z11owu4V*p0f6&Dq|BX~0+9CWX@E&!zKL0=5`v3ZbB~w6}0iD2GwbW74VS5neJJ7}c z{>Rj=Z{7AFc7ebl)b99d0tYKYTqvCT)%(D9em>sSE#IFV9d+%3XlH+0`nCvIKo7q~ zsS`dn49Br$Ff6;Zk*VPi_PwR3xrCg={zb{Z|AJ8|L?E)cRq$W&wvR%j&8-vU0m~&` z602HzD_DbuE0H=w(W@(M`rC%LR}^{Gj*D;ef{GnO=DG|85*$KY48ZYoO`e(ga|m`_ z<7D$Zk549aXot4u8oqgFmd*v9F)IfsbAe8htjQPQ55&V=B$H1Hjh8Z_6nC}2v=Xd& zjyoE>ZgG;-_4p*$*2o5;6BxtlkLhiZT_pVz^3mC;&4gTj-yXS4%*c_KO&bpz_H=D2 z&O?kj2v}FYrp_weuZk{nc*Kd`(r14}QmYkMb*&MzkrCZJoQMi@7gj$RlUjbBDo!uu z-f6_Pq}TMKe)X7TB!^Q*51D))oCKGPa?P&B42ijjt7Ok(C&7Io7-Enws;p&<) z`T{X;C{&scfPF@W0y_!gxAibXyfW){)FrvC<$zqe6ZhAEgmraz39%8zfLPG^P2o1~ z`qOfHj(U1a{pj^$lRSM==x!?;qqQ!wu?|{Pf|;>oe}ffgfQ8PE+&5BSTyXvjDKwGm zd!H_$I+sj1YUlaMFC9?YNHCdMEE(MoLumg^Rs&>Ar{UuB4E(%!*|JPtx&?!r6grfl zviisdeoZR9F_sWtpz}2`?Y`2(f^!uVNN=kGB+*rRheK2J?8*{$<)iw)@ z*!ynUgZzEYr!QUyb?TjRrtXM#ou!Uc_Jw|)I5g1jrZ_G9F@8cN}BUi09TGOg>{AAFidHsOCvrm;N z2Cmi*$AgCLv&rKQELXGI@ZnD&!`7~^pYhjzOU@8Gb8j=g9{za!Qu(!Q_m=!%O%=`M za4kr9VXn;L11$~Bc;1^C>1?hw@K=Rt^zV^#; zRA^o76oTF?3#gjRKY*r#Dd#K$}LFs$?Ysl@>jkF?SR zH*n`L#bVG$%9f_C@W$~UZc%|QgnaM^I>KFWH6|g_T9EEe|H2(Rgx$$+9L~>rG%@>Z zODm3a9a69(os9IqpLqarDXOXi7?;URcdcvqIXWG2>d|K0D{*0){7a8Gmfw%86e~WL z2yl{m-tphN08ZAMX`x}>AmNriQ(DP74oMvLqG6#Mgjogmk^ugq?2g^JZ*Kgv&7pz> zPH)cvx)0X+{XwSoe3QRkGQdi|V072ISO{3fqaVopA_b<#ti7`lI}tDt)gc#O7A~^e z)A3NMqtWcjEn?RR&u3@W3D?ytP0)K(+TQA2-zCxf5c)lRe*F9#ubNwIzExy5om=g& zZNu4_H(;jgQUAiT$JW>XF>x+p**5g%g@tCWle5UKScnBpQ$zh5IQ9f)xG0j2;4*2I zuW|e?EAL7JaORQy9B=I2c?f?;60m#c26T7AADxST+A5oizDu@vjeod^iqfd$M4+-! z1|X0OAMHd#!hvR85x-ww-xc2geNe`TbKh_|eP$A#KnyM!*{ANpgCF-XKGD)1bD)Lap_@=|O{hIKLfV%x&=u zz-F;e%tQiRb~%}{=rpteMVRQT(GgVUUTd0Za$=H_&FA;a_JOm) zfIC|*AAtgc0CE%A$e~)k?bz2v`y{x^rtQ%O5y1*`Dg;b)#Hm<@G>Qf;^LM1Y@%RxLK=wR$;fw$?V^}UJyCT}Z^RSmCI4nb+Vpzn74bL%L)H;lyvjHCw>jQ&JXbb@ zGY-9Lh;Ikn_?|+-JfU4cckRjEu;be=KyfWa3W%2FGfq6nJPgy8(MsCQb|3#CX_jbR z5gC|KMZ_0cmBm4BXsi*|buymC1)(k_*>saR$rDEmqYGW&<@4>XA8Xq$<$ybrtQa9L z-tEWUAm!1ky!2k;JpsfUh&ClGVc;f(Iq zBHtb-iks|K=u8`=T zyl`us(N0O5aR%>cwnlYqiNn)Z@X66K)s^>J&hd$uCWqdF(2J`GnqF;T zmW7d%A$(L5?B{yw45zIW!%o?kL7POh8^k1N-| zxXvxJ`7Q?o-T=aK=LFUJ$#GWRi;wqKKYJZjUP2*8+7okHe~BlW(Fe|Pl9ZF58Ulf6 zdG3FO_s8kL(*Z%CjiXkX?ki|vM14B{TTzUn08h0C?#C1?7_l*vC5Ex60pGV)a}p$R zs`jvZ>aB96uxDB_m)ZnNZE|x7gD*)ToFAg?R49m>f_qA|%QJwS3~>(YNmrfTEQt$>k$W{nj<)}- z%lZl=mQg-!#kD13+gzQ_q>}eyATWWIkp${Pu4I_NP`Q;<1!>xciv@4$lx zn-ps8W2nIK{>`MoWCDK;k7xRgWPf_dWG_BH%UiVij5j;mTlf*)R^)Dc&3*CC6=`f2U{V;LuCdM475WUu*Nz!!JvDgT(G_a1-qkF}||PyD=TC&!ot%0bN2g-)OP? zX+1mtT2I6K7c; zbgRnX=Uj5Z&$SfAAPszmd_Ts-j?GwQqtK9l5UQAPK~>N@P4qCyW%N{2;RcF8zSJdi zN`2M(O5le6<@-iqBpb>Sb2V<5BGtMY?Mh#$%45BIU%2+{U4mULaZ#1}+{)5fo;8j4 z=4N#<)deh>lKz4s;16FkasJI&4c(Q0$5b5=7H(SR%pBcAu3y_;Tc7Sv1g$YIaqz+C z=qn^F_dO=7qJbnw_z`QOsu+DlQ7MLVmm{l>ILKKn2`O~)7W(41+ewH4l~tAjEjXAk z4)WGJ0usLS@_4`N7)`oy0k`V5BBI?zMY^_WEqA5}?_26mr~$JiCgb^4#+CT#x|dHH zINW!%Lv3gqALXGQb14=!B2;|2+1D>)Ouzp!`9q4zhl1CQ^vdH5I@{ZE#x7XwRFFxs zx{s5cdK!n*DS(++Fyy*(Mw_=OaYHn{_xanQaj4lblbg()l1=(LlORT4t7z;<>0~BG zWT+weTau2j*|v*&NmM9-alR-IC&w9SCU%GgIcVXC?Z0;Mr7!-w?Xa&-gUs7WeljVq zxNcikl*C)gwm~`Dw1`hhn6T$^HF#>3_=SBYGw*Z;v5Du<#k_@ZHf8MEm(#AQI2F7a zF<9UPKiE{)^E4b4SQQl&oA)k{&c>GJ*%^!WcnZ6Iu~q2EVp~=czvY2m|Gi-Q9A;d9 zh-x@OXY}?WDw}QA=L8G^0~x-q>a-x7G(3BMDHp1t!2OAkfs?PFWy^?}u%YlS5Wqm5 zSjA^)>(0bxnOmLkP^80X|D>x8h;yyF6^TN7VqGF z8)^0Sb0nE_zc&J@6O_^H?u&#NIT92sO$5^xsM_%s$j$4P}$n0Rkb72Z|5>0C{#Ec=?0*tR(l5|E)KAXy5 z3v0GV9>aF2jD389yX{F0cao^nvwq?*>i42OivKAjL$&oLklMmnTGo(#o*lWpU}Fu> znZv$&6OpU7SC+h`$R*ZP5Kt)koMGpKFrvK2+It6FjQpWIWFX)8C@c$Z1Y76#i_>p3 zuQ8+@pHK*_0pSTkw~aaPYoKPGlbF;#OrcC=q5CqCQfQ)NPic0|KBd zOf$0rla?t(H_-_}L9>ud<=1;YLP23&fK27xL?pMOCJ4W);NvS5(JnIac;E*WL%6J~ z_=qfb#+vGrB-+VuA3abt?!K`i`iz8#(!--I!RQT{6pHZX_RILxLvfC_gsUp6>mZJ5YHIkbY5z7GE|ag$Gt2FFc$@I?@ymy_qyGw~WWmhV zM;#D)^oay)*6@}1RAar*w05$DiK0y|@g1P1dTyj$-D6~DNs!zH3s8+yqL<@OBB0Q} z0y35EZ?DRcaTDxJ;6~}p9?12ioXBbdrhk+jXz&?|s9SDcTS(Y<#YTs*$> zz%0D`+NEmouK;GNmHI2i8aZt#Y?VJWXFaVP?oX{w&*fjj_TV*pMTaddN(LpPZXA88 zYqb@%yqR-k_6pXiD49zBOwi zUHon3y_JFKq=s`?!bMLi?;Gitco3T?jgM8kWJ%btm(9D+wdc(66~}&#dQxf69W~Y; zw*P}Sy~{fbQm9Xd-_leCT9mTR1CemTn`e7Y-A22E`mqTPU{eT8=&Yu7e|YyySAWR0 z>#;MxTmpQf6!2(SzVJ@j{=4ZM=MQ@)TFW`_ex=oitFvM0)gpfWR{2RqJ9I@(6sX=M4fxewVy1oqhBZ8!5Z|Hwn1(?fKm{!g@#T z^YqVG@d16o?E+lGH87t7LaA|0TEXm7Td|Iwh_q|fvZ7)AnDj%wY))B*LMb0Pu}jP$ zG@u}K1>4H8Crbx=a1ag7z4K~ajv5SG$qUw3L_tb3Un8kA-{`f=>=GuS!{UHUbqlRu zr;;9b(A;m2&)(j6MbjW%bHyQiQ^d@(C6t>UITc+K45$dd~Xr6&|?LkaV@HIELw%~d`S z9!{y_5jX9dmgy8fMxnj=IjGDMxJ z?~L>;I)yT%gud*DL;~!m$=H@k<}Gf?#Q%zTLT}v8zxIgyn^gQRk5V?W=Vhk;t5Kug zhyka}xo@j*V$)k^DiYVJ$j4&{5O=&oEkl6#>u%id%f};8E#Y4@K1~|(-z9!Qs;b!3 zW>7Hl&xV15*N=j`{e-P-I;C~Iqo(%ZH>kP~` zqN*Ur4oQX^qZ7EP%513y=Z?kLeGHn`*d~iJ9{0Gg6nRC5B;a=!n-1R=aY?bW`77iNC2q)eow& z_LDj0xi4Ui$R42mI^b9SKFxtgCzaHxnCvlPi?dNz?dbC^m_d7qvJDC9zsnc#m;qeQ zETEJ3rodn{e;$f4Im*C@4vm{xjTnlZq~-s`{Tne<^Wl+LA(OhkC5yk^2u15XU1`lQ zRpf$XbDc3g3^NEr!>atQx6vc3Iof198R{f2Gj-zVDQthEK0xc_D?0qGpR$Z<(Zo4v z`M8x2u5#f3LC^7Yf8ekx5_A-!SDHf@sgJ4F^a*(!n_QqZ4P6VUWK`{%m7Anz35n08 zJKl26T=DrO;zcbm1Vbjw!S#CLj{#B;Uhj9F%A)50gHA5DK5;JXRu*-iBAZVIf6)YZ z4O0;}ho51jbBpE7NzZXKcmxGB`=UVDj~Smjbql4U1DF{8_}q&EWh_99A3+QtOE5zU zAfk(+OXm<9pcJDlOL#vHcT&&}XvbYPHBjJ&~yB+HN@sL()2 z2tVtDD5RlUZU>nre%Z$gX)FH*Nh9M!huCF#)+AI3+LslZqdzb;TmD zi=|S%-bg|OWk>^Egdj)4MNWM9QOIDk@54xZeBITo{fC^ePZ&dpz*`w_?ss*z%wJoo zx8W@)-}l|lo4+L{^naEZebZ*zc9zszd05oI%t>r8WmV z)9R(0lA4SK1egIGJ38R)*$jp3XLqZPfQCzm~Kg zP~D1Js#S%A0gH8@pOhLcN4m8Je+EVwb$8t~Oer^xOrvv$uvba;pG$QHUCm}_JPD}~ius#kBs3cDX3 zQPo%cCM197$M%ZW-NEb1YGHlD_IW9h7cgQf>HXQhWpS^>ck8(!h^jd;@;vbk zsrRARlx}B~1dvufKO|Lc_7VBnvW5L=TWNY<-Hl&qf9XM|9x5(?j3#DXomw z%NTnE53f&u==gpSl#A}6Up-pkLi~3sa&%~?+7e$$JaFJCat_DSLPN%o;&mLacnG2n>JB6;H(SM5_E&JgbmuGZW zkw9oN31Fn1#S1w)MrOf|b_>T zTzc%)^f!2sA{)tY259+`W%Pfq@7IByQ@}!w)V7N^oUEdFRvaihX1O%-hN~aeo?tdX z7T6e29_h2UOW-3lf_7$l}yNn755Mg0%4Ks|nZ>n{FEU?!HvqTUnv{Ez(=My=T zWd4OF)_-gSZ6TmezHT-&4dJnNqN}>Nc-;gIss)ILd>Lixa%8i>1OrCDvfB`XDX|Q& zpwvI^CId~^sQ&F`osz3C2gFrJ+aZ*xt5cGBl(d#-W6MAs5^KeQ2wMwn0 zXI`3totJLwy@bElL@Oy4GNmYScNX(LKu{;Y`=iu*ZJ5(y-T4U8f`$vNY%5o=`Ih^N|U5 zK68*KB`z8E0xj*?;O)zfh^;$018^rVrl)T#JFqRtks^iC$|hdOiy-rK{`4HRQyqc- z8{`0~MB?k&uw@!q62fBz1~Xkj!;{qM_sZaNcT3DLM^W5iN_-Anhg9x3&!)vnhG9DM z(oT7S$=@o>v~uqF{kwCUqY`kvGq}JY-j6o-&99{ajU_#ZlwF$I%Y*3%Y(h5M^Lszn z2>+a~Iu`?G^Y~Co2f<>AU}ydL zI`)&wFcB)Z&j}%gRTR+7PR6ZX8-}OuRk~5E*3y{Kz-8JNi`4Oib2kzhB9K~?6A|Bv zUKv$qDB($S)yw08EA6-rsq#IS*qv{3)WvAfxhC=N9A5tzxX$5e7`4}{`8Ol+mj{M=c;io?Gxp_Er!};NM zlkCs2@@i1z9B}0O0oi{g>`5Bp3n;(Vc!8-*88106_{I;|Wa|I^1MrT?N7qj0VM3!G z^G@pnl}%v$%vmSi`Obx;4d3a#@RoDPQQ4uX2FfosEul0lW82E__xk3Nxd_&_=kX6e zJ%DZA!9Qkis^k=rpqPaGeCqij3ksHb2jBXec^d~*OlZ~7&n~izg-pveE?&pC{ra-@ z#9&WbnXt=TJl>S$2c+rusGyatuwHmvSjCCz=TaR7;NONaXofF zL6TA*Y{OH^6bn3MX3r^O-w$Fh8A%DpR$NeZJfxND!s*6;U$d7!9}AGD8Z&6&m)>R#4lE9nSJ0(3CE8C4-o6pUcZweshxbD#n4tzN#q4h69D6lnU?cR{TGb(E_R-!V|rS4FS?wvMi&YvG|+I$x#B)zXd>1G4)xxhv9sF$Rr>_Rxuh(Sn6UdJO#GC0;e^5Etape?zS1dbYl{yNJga0-T82==foHF ziZ2NS*oY+BOHsEd^s|ho5Lq#g%l%x12hl>kYgWj@#|?1smwAWud3y6^>FQTX5ve6P zJj~#C@*>JFQ#xo~NENfgdS&iPzF--cYtLw>%4)=1owE<~tU0{Vi~m~nEs(zh0-)js z(f~eF<#!C5?MlUnf3m|2iAMm$EKZh;d?F4Z4^&~!9Hk8XoK1+o9iNT}G*WEw4*V)x zB=wq(juDOG(1)H*FMu^=ijL(uM?jBIFal6H+-VQY;{5r10NtmR?(wzJOCspGd6z*w zzi8Eh<5S4H072!Phh8}5P|5dMVD2wbfqD9#%;7+WeuO;3szag!&kS(F3Nduh>O8w$ zyI}oC1wFZ4V#nkqyLvq?pu~LU;V(w)!qK*Q@SaE7SuwqW`|kV?7coQLL3`dckvmU~ z2ct(yGvC1TerIg}G zZ6%`q_b!0!m-4~6CuiNdbfFjC!G5pw)QsfXpAF_19yl9+$@J_)P&U%TCCZn|5A}Hq z_}6LeE3F;hfdBN2bWn9SMNzHJ=k7p*o~QA*5KM+Z1Z9^C7{ji!?E@~_v|}YYkW3BT z81!S~!{^Wh5nFQ^u>hUnr{DOxa}%aEsz}+MhBf1SIKsbm$ZL*K2A}0e`$>HFTHF{^ zbK~L#Tezs9=VTMPve8YIgNLj_oA5)6IE*EE*{bCRS$_MS4&tuYfIX)yLxA)>c^aif zx&va1d*u8(ql{m8H=lzFO!fEdmWUegwtaY{ zyI&T>sN{L2t})^jcvpzMt<97}ji}5C(PKc_8LNRj;Y*No7i|o!0-I2%$oUku4^Z3Y zQ?+X!R~wfrXoR0zFW*5_;O~*xIA)|f5}*BjFG-CQQq&MZs1`^Y5`Pc9u+FA=+yX@m zNunVyj`lj`M<;y33=lH=Xr9BPrzWF_Bv6*1ho?%%hz-RguQsRt_~MBp0V79%OKTc) zKrXIVzm;VWZ6MBlE7rjA(i0vuE3UBj%S1unF5?U`CMYJ5j2%j1ltrC0`*YkONt@Jzd3Yic5k1CCa znYcK)Q1T95pfS3%X%Qls(eWPL??=>it1T(;8KLE{K|cAFS!urP1JHy$ao^;t>jHLX z8;u4tn>l?>n8E(Bnp^lSv`Q7qE};#k*Rj2m!bYyqLD0g6&0)YNc$)nw;vGCI#y;7I z0FL0w1@MwJz0&bynzx*2PySRqTyU$aR{7NbLB$5^|3O6#@Q_Sj zo8|V6L8rjM%=Q!qnAvI;DkWKmpc4;yHEer_&cvHfo(xYQ3FaE|3abh_=McFK-mi%s zh9vVg;aXB*RTQwjDlf(^9RIZr6G&0ciTv(w#G;lz=FyG$IN}cMV84NJ$TjG%`aEFwDFg^}e6y z{U6U4KFA?^uf5m0*0s*-cV7FfcTE^MYa8?i)ARCYY+k3)W2?eRm3>Ccrx=FtTCbzD zm--Ykw*ydO%DQfjm@Dcj9CY%xiCk8X%bd-~S)P#TFz>vn+e>XMyc|)IfW%UgaN!~! zAK~}!K2@V0UxBwngVfo~oBopevKPwKN00CTluZtFQ$u)QCkT7RrD&j%>vv2#UGQ z{8h#-sKxO!#d1hU%ge1Cdfx?f-|wDc=-Q#_yI}1fHN^~5^X{CSm=063iP{|WginCo z#0Ye>dl$(|m607O^g{%=H*Yu?M^vIYqR-RbCcO(J5*)hw#IM2yW z5mj6$J;AlU!IZ{O;Xwb<++ZGFj_w<;z6i8*!B_z0Iy9|3z1T|NCFnt=>_KI#avTS&E@X_c~VR5Q5c^`K%$#;xh`Z zdi~zSWvN=UYL|0DPmgBHP0nxlDPUcS`o}gQVPGyRGYtw=+s4b2iL_<(>bGg5R{n60 z!Q;$s>u*vJ`cu8x-L`8M zcvtp|Z{NeevRih^%prgodM*U^rI(O27Ev;Aa(Nz=@DI#-s-j+veA{}4-w418Pf}_& zZT=$&TACishB0EE)&wrJU;ZC6hCk)6TA*JE0JHv+Y!Cs%|G+}KA%ta3iLw7*&Ei^4 zF;LS27J2VPr#sKz(zoWn(sy;l1W?gRVbChWON%y96m2M8tSII1h(LBaNfu_1Pz0o)fLxWp&pjVqZ2|&`Ewc_-OV?XAxTcyaQYilJn)qQ`E?@ zedRJ=VXbc*(V`qqtpHT%2f=Ma7dMa13qSihH6KLh;I?NUgyL*G4y@HrfL%=gI_Bh; zF-pLbMq1=~sS*!0^qS|ZjEfP2H&-6MU2w-`Kb(5gCE}t;fen4S0OsuuwUnwFjLVya zLkTg&lAoaX7_h)L73`tWc8aKC_}jRnlzjIX`@y-ReqL20)`yeJ{;~SG?#3wleT%GH zDA>}9`I+Qv`CgfiJ0-H7&u&jo3*<*jT`$qsPm-wk1!?K}8Tt7&!17j!N6=!ovYV%W zkt}#_J6~ouRsJ@B(o$}sxF6Ryzd>=A_tP*_|IA5Kl&4q9l(JFM!cCiN;>D3U%{L1^ zaeGujm@OqU-v?qGIe+|7q6N|8yDKvYZ@Kjn-59x>Onr}t{_#Ho$sejtpTL(oY01S8 z|A{&tyArF_K)aeZq0(OeEq-F`&csj@Gzq&J5+ki9gTR)B`Z!74^-o9KnpOReX0%{+ z8T)tAf;?@(Rr!KJwN%VaT7~b@gQ+RgnFL*bghKHPzHQK(U;Mj+k4v`zVXDb6!H@l} z=o|p}idEKm1`o8ZEXf1_T)KZ6@5{e`8k%P%?hU(w5!3p6(l$!?Z^k$JAN6Ui4p5(f znyB?tBfVNkQWYa6My$1c&FtY~bAZXMfrH)+Cl}47qckn!<4EI)PyUCZsJq_@vl`kt&N$PI5uOQ4N>M;*uj~tShustkrpAhn}xTIA))ci~) z34>j3zR7V2hqlIgMb!n??fPB#5#MN~#s~;Ua$s8A)0OtPRf`83dm^^g2qChW^-&~G zfkN&UosSl_tOmfSt4pt6Z5Cb7zEv8zkH7e^CDsb^?0Kcu4u=IUhWdlcrtO4PnfJ=o zB`npuoynKzXWKVb2(iNI;g-Umu92c4C?c2u2`qrh>F$B>{CL|dwf?SdNz=qPw?^nZ z;g)W)hj7gx?PLr3t!9;4BXUTeRfA}x#abM24*+NQ^j~b)nk8WWSc9hNt&Uqx+XGptYPsEr(n`NBU*dt;97?#EuJA3v!ZmYQLSt4A25f(oyYpJjN z(FXhO{wZ_``qqErdB`a1PEDivk*spo$ik@cAKkOzs~|`2UsfBS&i~YsYKY}N6XRTv zYP~N0>w1GLOr&d=$$>AY>|LGV46Tf))IeDqcnXG|5SKBNkOzLzP7{l(^WoR0ff~EA zn2RF~ub_=g;w4b2m<_rDa`T13k3N#E#IT|`?%KKfP4CqDMfjtaIoli_z(Bu5N!}fs z6k#6^#c}&G&G_!^4!4-@=y0g@+;6CEFj?mM^u8y0yncTEdE$2J2q<~uKBu;_Nd6i) zB@mL1*g3k;Bw?u=yCJQh3Lv+rPp*SrBs~hwuk7oT#!PnnW1$u-mm5Ir=8 zo(pljFCCs-R{R)Yen8>m53x{aZx4=MdO51bT$)v}F6pO!(zm@N9t&ohcWRmMDE^)4 z^5Yb|XmZha@cAbjPD3PA9C~#azlAawkb&IkhAS+;iI0+H@8=N0-XKA=XRNJDzK0jo zQVT|BmE(n$ne1dhlD3liJ)X$Y+QmH@;_Jlp=WQn!xwFg<9 za-K|80Z2sY^i5Bo>}UbH+c}}e3`cTA7s6tES|i)d(ZHUaPhDn-t!)WjBx#~39cdmv zrn>!ODPSE6&MG5BK0Q-3raKgSiN%I3(VRyO+h>N|Cc@<)^-e4!v$5Cc{9B0Kb|UGGinIIPQQ3@*h7htR!d2}FQa8&wqmrB8GB4zT8czEol9oLeul0F z^213V$eY(m@|`}pC3*a6ym%qD5&@rCVQ3l3**d9UqM&(->gd50d+k=NgGjZGkzO-uF-@WHz=o^>p0FSni>wwx>m+ODnD~&|eMm)e=+W2`K&4S>YPEDk);!My z*WtL|Zb-29tUUot^EC{ShS zsO2~vWSzn;L-S@izz&_@L5Gp7P@v_|#T|Xm+sZ#%m&z(iiBO1qf)*^LD5x49nSdKQ zWo=zSamjc*js=&N2#v_wH{3TeWI>Pjhippg{2)9^MklUbkA+Zpq|@sbKSs$*ZrRfb z-`Odh(ljI6;tGWK)8^p=w9H77{CajR0Qq#2NibrnW@nkHBPBBRsr-4bqAT8RF?Y@1 z`n2bVp~O(_uC%AKSAK^^(B$n}ijd(SbR1ZH85_8PdVj*<(6$ea3c-ybJ;GY=-aLNx zd?nK94wQt_WC{E{G7roL_i>iF-UNXagn~Q*W#FdgJa;@oGd|9_9c&RNv308jYF41H_7k9yye*Wr00jPxtyje8xXb#g+|^a(z3+QhH;nyKLp~LT zS-!c&_AKw)6>L52L=Qs&!iIb-*s@luq5YA`NcK&epY7%cja5KCcxjs`6&e-&U6LhK zxI)~iD)Y^p0Q}{9TY_%8k^8Y0rH}I+Qqs&*GKJxZJ0C1aLY6ks2~pK*2XaqFConI4 zg)Zi){7ZjQI4$3D*; zyL8HqQLX;!vl!Z?cYsb$0Otw@gZkC)85shMazEW_>guB;!Y+h`ghEWj!fkmy=#O@Q z$mWZ9t!8yZ#}s-dlk53qNteN@edKX9CcCxK;A$D>*%gEQDQ zARb3-XKA)CNuWA(LI~bY%aK5Q-`7WvT>Z(^47<{z_X|m;xcI1(a2DP&|DKh#9Gyu+ z88W_p8!K1T1u{w@L_y03ek3Pf!6vz@pW2egy65;>$&JK8Z&dY5{b*0XCODqn<}>;o zg;0G6$~ZAm6+|N*So)SZwYwC_6U2EP4ZhSWWRgp<+Pm6jwodAS=2e0`Gsg#;DHfVN z>a1+#v9GBz@MosPd{c7t)n1A0&?MH8B!}$g5}+Hlv)e{msPy^9+Y-{1=X{CV%Wf2j z)l-zjR`bRMd2_Jpe}5pYCQ1wBIrfaubh~D10vhwQ6@(ry1srC}S0Et3fZ+8>~2 zTzweBguF{9K!_n++i1)cMGc<8+1*lTsnEqPJ}#+AS_^T#?y&D*V2cA>Pgnj;q&2ex zv`Uxx2e0Xlfv^2%Pk%{S`h9#2xuDlr>ov;P4p ztd&h4VscE3PF@*OV6zPdT`WeA+jcCz1e_dgE9*yz-0Fmo6qPfBzb7-1VhNv)P_WV6 z2#ry-^ag);G&SV$=_lQ;9x&awFI%cRzYw+wp2dD6cAE7|#OS~?M(~Y;nMSKDLIw%z zHGEYT!Mo%v=&TO62Pf~TsJ}dVBm|*kM~ansQDVxS8fN;(Y1`Ck=#KGhu8;Rt(Q^Pj z0}9uO<9wvP8tegg%`JolqHCC?WjrrTFh7s&!QQVsWxV$p(c7+mzpEM)FK@c{te4)Z zU1Xx=mkX2iC^VwqOy!Y3ZzLRc7!TQ#SKZr_FTCT#x3jPW zli3w{zPIGxjxWF4I`G`0c7(Fo#D|^~e7hLFD(Kr|&4?>?Et3Wp3bD+uGJZCo%L@95 zWm0fEm1e-Cg)7OeG4V4(w+#lT9H;g?xq>&Ft#-kBS3MX^U|igv>itz?K~=_Jf@79x z3IlT1`8WXzCG6;mo{^y(`e^m-DLX)aD+u+6WYJl#UmIN?>xicZB$B$hXZ_t$k5N7U zhE?_=FkOcNV*Q(8EQHy9?W5)(5fnKeuy65|C?;Xaj~xB;w;gSpObR17V;&P{UN22; zT`)C3*1mLD7`T#qg@v-K(&Xsd65v?Le3c;S%_2};Dd zgB)B$cN~7(VIPMmgkQZ`@Fr3)7DOYbhZ&gLdPn7Edp{OzZUR8iDQat}E&DgxMPfDO zv=r(A<&f~V8n!b0DaegeKZuO|W%!FWkyK)lybwmtjz)i*@!o}UMu2agV$9Vqn-;ZI zWITLGEd+U}h#txlP=XZtY%PGO<)LBE2pKC5_pB{qeWcjfz)Jy|fp(v0Kc&)zg5Ezt zAF?n={IdJ1NO}8-6ai}p3A>5|KqeFa>afS-X(c8-{igKKP3_%d6(LLO$T3M8mD0Z zfk}eqjPt0ThY8+y*pnX?m*4V3ZYkVX9|PV-LLWco93(m_qjN|~7UaI6p5xZPapmXU z0`ux8Qw+6&<~%YtG`nQthsw8+b6RrkLbE+E4M$y})o|X;jlXw4Hr&jk`c1BwCs)v6 zuTVgM0yFD=0|v1t8m_!6oCk!;0Z1!ZE->xZGqAcAi3)CY(?v` zN-yjzwEFsg5cnGM6a>={#U-jWo)uVE!C}6BdrA`1ef|78OL5@Lro&y=xN2tba+ZkGk-o!-DK_;WK2EN%;=8av0AYUsSqNs)Pdu-k!E zaWdAcq_h`nOSqpu@0$>^)Nn)QlwGP=oZ$2LoXFUofLAS#NPidyrcOj+kKFRu|I`M? z{`vFn0L8(#;{Gi+#Y$to{(b`B<+O;(iNFqfIjWM}70PuhJ?WvC&>evPej7$J+1ZBt zqIwM_{}tE;OoT*mg_pa4I>At*{GIXUz(Qa*8?7)w4VZ~j81AM!z9{WYeEzOTg^ddB zQO57n1hge_V>JzP4VO;o1n#6l#^d*kqyj>y#fNX*jIG9ZSp64-7Gt~hHrqi7;!V

_;E*J)QDffj5o3s|$u} zIjw!ZUUL@_&U;@CANlrLmFpk-@^MM(QmX2YcWBT423jq-)HYnoP5uUK7l?Qe1NxGL zo)OdYR4WOfEd@ZANo&aXzvEQEf^(|Qls>)i-wKEkzoh*^ahr&VptQSKT zU-HLbnsLLpyCf^w=I`A93qkIrO-;W4&xJ#!h0v*w(S-he=iKAHO&G93u_j=F;nHK) z$`Da~4d6Hez5ghmR}`LuPcg4QTw>i5Nnu?lzoZ4bB)vIcqn!#cr+H6+|4!`o3>I%h zfjKq-$2_n9PzuvjC^Y^-9bDQdg#@#|@()D(vnv6eI{En}zGL7^4Na^gsa>bGY%W!j zD(m#(>mQq(8x*QiRI-{Q8-%c@$&zKo_95{y?IPIzn8OAp?RHdK0Vu2Q$jENm#L?zbbQguQ)4I$5YFk~)Ym zZk%%!B6$S@_hhZx%XqERhc46Pv&%HuZni(7^89=1O1Lgzt@>Qo(NV9U6PWA;>HT0q3$u#84eQy< zE7@upJRkkU{JCM!%E@iX+mcIV?jNC9hko;HD5Q1V`m8VXUJ9=e;p`YXkTdC-2>Zcoh;Uya{^S_y) z%2cDMF%PqeVfqYwiL;}#rIoPPI)E(Vod`faxhs_KCjePwfB-jf5Z<&9tbr+H|&rhr{qY`Rx6zZh)q2(h&d1Nre>lm`>o^Q(7$JRKs$JqR~O zjcf!}5IRvn47%uEOf7_@udT{=jc-Q8+lDNiDBQQ*t%W0!o3f;jmoa;xYU?Y~vbA$x z?e1s2DME)gK4v`Ob2-mvT%L#V$$Ops?7if<7XT_<64Oj(F3riK=OV%KtV!vm+`^cr z|6TLh`!$c5t5jak{Mzg#j7cZQa=CCpRx$nkBw!9R-f2OAga1Nj5#JD#ND9E2M`K*- z!cjfG60P8C<3#OvP~$6 zr@%7TKsQ4ac;%T2)tll{`NlFsZA?CgXBAv~K5H)BTINDKb{ZO-|fE@y9s z>^tMyS-DLa0Eov_JED-wtSbdhwn@sN+42~kjleJO-3po2J_FCJ$7gC8BiC~7OC>G^ ztE_BCPUL>w+BQ6Ve6tO#DZ1YV7EqreWn~ln0okNqy!&`sOZb3DoU0ma|l_<$fwcR_IS})l+MWP(hcz@3R(|f>s=PF4q(Fls%lp6yj?!@?g_twdoTNd z=!LJ*kE`*&-KHo$$-;dyc9lvX;n=iMk@h9~1!fypCMu?HLYi2(uz9UVz_i^C&&6O_ z2|($8v;F||I=6M%^g8creDqaS z^`Jq{&d}_oD_S@w#Ao1e$tAgkXYaWTRd}Z>;yFl!c{sGlI<$}^NMT3~>AjwWp%-+; z|L`O%o4;Q4yC5gNHYM8#C!+lthPEnbHxnt8BX|{MAo{dP89c8@fbqE2L5V4POT5() zntiQ4bhH*OJ()-C`gt0gir73Ga8GZsp1r8I~S zVOeL_LVXk%xQ1D|TU`@sNoGQYUaf>Tvbs%DmHkOlEM5Vb%B3=nk%57s_iue5#J0?e zmsr!yN*G!CiWu7MrHXt|#rqs7Chm?ppTeoxuyEho(kzBhsCzU|-kti?Ea|7p zCVp_Ee*N70y07q|k9rA?ax2ggFL&Uj6XER|b5AiOPp$N?Z0_iY9}FRY4I|bBQ%UsG zFSgCaxpS?zZNje=LrMN!*M*7^dhxlF=in`6y&TIce4M37Jtw{(=BSyS9O7YX8wxWL z$PDj8*l$^VqGU{cQ+QZ)$Yqj*&wBz=N~-GIXSG*>Ca#we`!mihuq? zcsnX!2ms6s23dd@VIv6#7-yysDwdqgN$bxtFCl=e*LW%Gz3*HG;uhgg42RJ7HNSO` zXT`Ib{yXbr3Hrx_Dm$LG=B71fl?i0wnv`{83oPO01FH0(QLV*w zOFojT`_?b5euaQicc^L`sWgIyjz}WktnG;is3Q=mC)x9NdUx{lf!;s>V>tW2s!;hunjSwZ0tRyT&fU8uMxa<CB+neW%n9W9$ z6Xwe�VtW(90zIn&GPmHU8h54}7>-cpR@+Tvnf6Cx^VaJU;N>1WHeAPk}{jlyaHv zi&@t`cN$d)k$inq?mh)p(hyE?-xM{HD*>EKY%k*(zxZcj=x6_l_jf&?6*c2lohY!f zFGqmf-h>6E$De%VcKFN@dZ;EUjSky9V0%yWQ>?c45zG@t#{}}fqMsD zZB*|oG!QBQ-=9E$)~^}5!JWWq=cg5XsadP^cXrBBmu>I0fVse2V!vnJT)3fynO9bt z^X{_iUOywRp^!Sa$G-olkm%A~pg6OVKlkxNo%9?3loKd{6PE^>I|O3H^cq?9v&_bT zw+L7r1O_e-L3cZ~!}bvi3uTU{(B}Ldvi*{`72+K0H||@!nBeDm1eEyNzbHIs)}tm(61RkaGVeK2|OW&vvnV{FZpLQL*j& zCxSEOZz;11yV~`f4oob|djg*J&u+4_Q6AF0(^La0kQL=nG1c8xQS@|?00nkFMlANy ztHRTH`Q)Flph-^_zn*AjYxCXVhWK7Qf+NwdKa11>(ZVDjcTd*It4TJGSWq zxs_ZaTUMEZ2Q`j&mo5xoD9yU-xk`Ed=@an4{XM&pmd?JZiiB(3K+`6bs7p>+l- zuAvKGALZhMcf*1eqsOT#b~X1e&$m4vG}q!U=kk>QBjBB%Ha>_w&F3 z!ycG#;0ZQEAtkfVdVyKNDlAf?zG3?GXY(8A?%iIpc1vgYhy72j0>WPN?amjz0xTv@ zzu7>@(bBmzc*jN*QmUNVEll`VNI}TZe%IbE!1jhKFCz+1q=*`F^;w6AGc(on0?v9Vv=v{WB0s^fJwWpJ1Ll=SKU8 zUs)5~>67Af(?3P*v;Q&+p~3EEhCK(hdkyBBtv|7G%UUB%B%{#WLggP}r?pU` zcD!hX?5gqUlJ{g+8Gs_9%Q=_NpwexzU%gq{GZuW!-9cSOpHc1C0%L6xGErq{pU)bH z-^>_~&oBZ{C0D;+|6@d479?SwlvbdSz5#WQYLGD^Gqn~fIdfaR%9Q;MxFO>zxW(DXS zl7G8H#?1Mm8{*UiTTsAfxsG}^VZ`x~2c32UW=B@rthC}d?huFn;kVoSFwjycmA%f& z+$NrX{;yOHpG~zEp10Qsap;3*#lH+lf7svP;W~NmEo)`o`wYM7Jn&Uy--~37TiP}u zj>L4AOE@D1CG?CeFu2mL4O!bF-!pTi^I(idL}sUPNL&P!x%*3@{-gFWn&&#Q*#rAH ztT*!E9OnmCxq;$w_+zDh88;Nd&pur2Ml9wzr+&P!m_}~swyo?7SD(*pRGy{WThH|J zOn{VbO?8Fc?a@7LBNh}wc6}$My10pM6V0#o$K!7SM9}~4y5jI9{H_}rRcxqN0h%^4 z<2PWunEtZAr0Ye0Eqjp1oS{qsX;x40hn8$h)U(CKfS_8wQ9M925Iy*(xu8~uh(PT{~XjBwF`JJ$0_mD!ty zIg(yy^XwubyOwx98*N+*p}9+h9kaeM(a<<^Oo^uX%v5TifzxExOUgIe|5y5z<|q9) z`JT6D5pZxg*dxRJjkXkZ?+ItyPO53X;_Vbt15q{ewAm|jhj)4w;DbeOV7Hvr{oL=e z0!a#7x=iq&eR2=Jo7*!l6^Rd8%=G7Qn{vYvt)p2h-wYw3!DW%@7?<{|8yCAZ4Wwu( zlxh!^aE{cy%vd7W2*@1&O6TGnh;DFzdu5z4i^D}d~f zjsY4#f?4+Lb%G^e+8yo9iOkf`?=|-1?$%3}5T?6>z+uevt9lHyZv+tpx8fdR)tw*C zn#AJXG&%7vAJ8?bX0@$aR&I4zybDvW@UU0LCKc7;-CUphXHXEQuWW#M5S z+bC&3c<>sBtCS!qAvBLI0m}n?_U82!K>ilKWe6z#ZssN^@gP8Zjr$m1m%2Ls;C=`T z5Vb$?u7mxJl}&^==B1!7aGYdmduP*4q~B>-C{_8yHucv?!PI z@vOIWA=%3@_eLB#%QnxH-uz(HWTQU1_kvO)sy*lFiG%Jkm$jUEfNBDD`I=kMt+3VP zQqq0XRs61ei--~rb*qbDkG-GavKv03pete9wY|Pf~xz10}7e%$3)Bxoib2s1@+_rC(oKITUh2!TRx!enQ8@ zJJf)a4|((?P~@&I+UO6>eloNVon^#)esF8Fq=`^`Xuv0dC#Phs>X)CiBUr+&BljNN zUAH3{YVC9Lj5f9QXv743a|Tp}xN`)_FBi484mVka1lM^gbys6qHF8P!t@kGs3Aq*ofQQYQxbh zG0HA?%OlC}t`v3YSua06?0{!kh(JzuM@mOfxFeSlc}AvN?I0zY1v2*o-x9W9nN154 z2F#v&j)bamA2bul``!XA+>#iklS%$ilYe~PfPfp&zzQgY#d)e+8*R|viaN$!gBWqMIJcHXZhOck^poi?zJ#o;9`Bbx8lKa7u?bBlK6={($pkT5dwHhU@c_0a{X> z_xkClHf)7gu3EkZ{1R@=yB+lg=_xlhuH3g2rAbHvd|fXd=rO!iPvAC!ICQF!woDMl zc#DWwv)(xzK^nKc5LN`$T9i^G{CoKg`j+Pyvf;XUcTGG0SEe7)D@%-}&2 ze=g zQ0^pPxOiP3iNH|DqsQqks4{O=!PgO>=cOyF$~|9?6Wvff{UK@k*Drn3_t-F(BQQZ3 zu$SGacC(IPKe+N}{I<$p;c888R?iAgps_2bM z5s^(7y?|~7_Z@-{To0_snf+1LQK=q<18oWTYAj!_apHdv$lIp^z0vvko7@OM(8`{} zgZ;)dDNi#gV?l&?sH3)|0)pq-IV_FuQRDu>mGq3@toW=B)BuGu+q#@g0ma=Y;95o# zk$O>Gu4|GW)rlX!P(w0Oj=Ewv|x+D6xr=JXp~uOU5$vrKNObr%dVqo zFepLr^*Xeu?MAyFmGv9-HDm205qzxmNWgORGm=qRMNsbCFFQ2K^{QRqU{8WGeruMu zZy!arJhw0nw5lzex97${5Aq8z!D8>}N};PRN5nvXxpJ~uSy@3zRE?a*G_sqdtF&niKuG8(0lTY07)Kb2G&1eO$WfIBr+ibL3+K$`P z?!`2f6fU=#^0(Z!J1XByHZAj2i6)S{VwY@PX-(6_3{TzV8kA{PeN0JmU?LH=CaQ4i zhT_T<<>2>49&ADTPo)}}>LI)xmBW_)N-QzM$ONB+>g-VIeu0m}(MzVtP5tIG*YBjn zD9=Mox0br-<@iXaL}v@N?%dQb9KOAV;ECvUuMhHi661)nr??+%`FE&-#fO?cT>b9_ zX8$%g`*Sh`PH(FvSGb?!uWpQLpsrO=zxU?XMJ&$Uh?mwSL?5V}fUn6{qcb%{&}83@ z9?=j=Idz2~*A>J>BY=hVYTg$r2S)bx>k}Gi7`pD$Z8XPLQQT_bG@6sEYZ*IkG+!8{I^TR20$$7QL zhFpp7LUMZMnLb*S>(90-tY{s=NKaCwt;r$UPsrJ4?)1);TUml$L%X!T3Nmo2(LmVP zrTXQ$poPeUr$U8K_OyoDr??gyh^LnKLC)s&^b$1K3EZv28*RT}vzy60Phbh8iIk`g zPEpmTm)?`?5|e}TiG~{?nwZBI)WZ+bYR<3^6(VGOp=r-ufAoa+l2*3|bY5#Gt`zB_ zhAg!hOwT9+C8B93Oh_TAvsl>$R~e9_6bkGlHk{5MTrmMEmE!vwCE5|$Ju5MTt! zeoUXD7Hlch1HamMH_3uZQTcMlKTAtH;ygFa_G9ghnvwTK)BMb3-Yt5hUSr{|sn&qZ`q9M#Y1RagIeaqpgmg+quS&DGb0O~c+d1$lo3bEoQ&phel2V9W^* zvajY~;S4wVC17-pTW{Zp4qHrerRghqyJe^*+>EQ+V>0g{+zbY-Lzh|(Ha4T!WUFs? zP#cP?Hp4oD)>opE7(zg1FngX28$^%nu>WuSyCx>^Y(7$wZY@&D7a>&mk*lWTfa2k;=3WnxGk39HH*ml)`3L)qOy~Tyfz|2m1sx zkz~@aQfX|nD~+G8cJD@OEB$Sdg&OjH7_zCUqgel{gyUon523oNNO#R_+Bs{+V&4n& z)^1B{CBFIF!=vOC)MzoyD^dhG!j2Jb+PQyeZn#O=~FKF zL49xWKDp=x>r;*c{Qk!EcjCC?WBK!&_;tb$@|rrGES$i+S92m;fin5I~+p6$wzG-9SH&&|W?&YZZV7GZtj8VTQRSIqGHF z&xnbg9;r0i+DeV@YRkTPi-f0KmixnT`B%!{UEQRz=0p4j>@kTc=G8s{#z8DYoAX=9#GE5RV(B5g zZd}&hE5+lJQ+swkr{4$Vs$FXBE9aouQJ{R59t=&lV&qh-_fP4w->spsq`}TBu&DPE z!&F!r^eySNSN=jqvriE3mWQ#iL11@xjI zjGdQCMM-hbW!J&4uPFX`a@KqXj6y8e9{yjk5ojg^DDF_7j4qWoTlbB21hP7{6e=67nn2W`K?HGh1LxRF5|VplXa0CtKaxs1vGGH zXDNs4Nz^7ST$(pXG28W~?;Wzh-%er2rgTBE#L!Bs-`GaUXYJXlXPwUlb1m=5@DoV9 zN_De{D!oEi+2CzE)ZBJggXC4}T@%&_-XB*^4KM#7Ek_gRWc@@Wx`T#za@GMq!vq#p zhids1npCSR3Y{+d_UmJlD5|U}*YDs63I)Sl!{x`ij zppDLJasU=zB1{2kDe?rGxMzc1oFR^-$R$o2jC30kdR%wZXg;I=;n>^Q^3{f!S~pWL z?)dO9NoY5>iO%SA+nrnh&fLsctFbHe`0!6qY=b!iofh-cHZxokD2>boq7T1>1m24O z=6iys6#MBPAfRikqiel4F#dl@b%Out=52Z%4QpFTmHqb*|Ltq{zkv)QY#C~AfbE|N zf=k#h~pw%5xqPR!sAR%JV()+*<(^84?>hUG^2ibqr1w# zHeO^+06*4g7h!2^fPQz~GuhYYviX&PbubZTpWuC&JWZ2l-0 zo!gj$7=@2!1XyKmPSo50#ivqD{`-2osJzR zbsw+V_l@86e8-5Z zzZwMu?d@u&)KncNzPm*?`w?m6vR77pjS+M3Y1Q=juLAMBlkm?nthSW zUc0eAKEqSq=?brPPxn1Ns=kNF`DMtg4(L!9oUI;c-c{lwYw%~r!*z;~_Z)>R*)T#~ zBv$nBTc(FSXB~d{u?a|P7}&ktl`Q*Q@G;jW=WTBGu*m_a7$W{g9Hpg0f<23pzjuvJ zH|Vj?mh`xA*TPIIpN@Q4Wyf2wM}m+{N|X4==LL^v>^51Ff!jC6KobaR+nB|~DR*gP z(b*%|%E4qv7NEGw=KPMWs=9x07OluZX|b3+JO>7wWNgq7Lan}KIqJwh;CmxcK#c1- zy*Q~sw?Jz9^o{jF8js30fjulq}x#?}~PJwg$^spNMN zu!f)^^)VH>PSBQ@^)YEG8(wnyi`c-?8K<#o)xWV6o@yaN2uCJkD6+CS*04u?D@8+mj|fonD?CDY3a(&{c8MYz%anOO_*+G^f#&h{onGn3XN?rEa27}#;Ex~vGqfy z7JJNpSO9A>$i(u|z*23G_NYAaY*5~5DY*Hh{d6h8eo&^t+uCG$VB-wCSZj$8{fJFI z_!eEgog`#{|u) zkeu7K^S2G*d+h7&2fMqLn0E;6-4?0R7tfyTpwkpWzzv3aL(|DOcWzo2-1(724BhEA zu`7(T2=YlM+Ao_7;lvf4S^-c_A<$!KYO;)@9}hQGb<+853k=t23&doIr*06NO*;S< z6zxOlvLL8hgXi2LUsWRKnpBDt5H!G8xB5jiuvH=>b#usv`tk3an+kSS1dFcWb$$CO z5eo&02cSDACPw}Y;R^n^g-yo_#0fqXZP?B!A=P_JxUQPv!E0gmo28-871*Z}Xz z3e-a?R63q7))D8aCPE|C_2x1m@4m|jL~0Kl|Pm)7%7K*OXb z%UVvRK$}=SQW*r`^``UkA5%+jTcT1(iz~lQ=t7CIw0a+gt2-ISdwddVPXO+TH+c7| zxh_4Q^AQm0LqJF$u{+)u5G`Wx8rt=&_E@iRI4fZt|B8&luRbX&Bwo2SA0v#5gq|8> zqzUxTAZpWd<{**@R8@oCv-1%A6bdyYAqCSTF2=j8$0Y>-Hz4bx$cb3S3ookFVJ(S- zrJ!SltfK)~WNFoE1Q2LiP9oQ_lLrs=^+(UqC*u-#`Sl+F;}AmIQUf7dp&{>5DFDDD z+kfEE%(i#aePjejKM~|vRvfJ}NjcZ40)zzONwmz1pjM=7%lPHJYAFKub%pp^)OTs+ zq_(#$9STfDw_WT47K>@WJVE?mT(}b)B|7JtL2ph+;Q)_}jrw?&>K1gBGlw%2MmSKNDa-Jw6!@(KFUoD)EfPFiwiwm8_e`Y9+TF{1@6 zlv94mr^m)5fYvcz9`1@Dvye``%&$RNJ-?4G?IVQXW-`~6x95r^SoY`8s%v=K!vSoJ z<9^P8yQp~gcO;NI7gs7iNMz&~W8CPXuI*!EsBRimGFf>l_unJNl~7V+c|{*|Tu!F< z3E5}HA6|HEILU~{;ug+@JbK%3?cM?$P^%8zxh6v^89pV*7S!v5`^ul?0V_fk)C!^F zCIQ*3q$p|i+I94T0dmIcg9RQhKylZ57CktFp)vG5vR4$9;}HQI3bXpXcrfvs13@cB zw5H7S#2yU>+1v!r+|Mm#=gC{ZiAj%9G-(HGuV2SApadNBS5AoYoO%(gl z4(19A13!54;|_l$BWAD%iHbIKVo_;_3#Xs#(3J@BEWV>P#>gb(OsGDV+|mBfr8i&) zkG}Jj?=H7zLn5&IrGGZs13Ic1#{z}uqJ9Ffv;g!@WWox9^x-vNC4pCeD5pLYRMK?wl?8*H;|bG(r9$27`PT?5t{(C*VDP2^Z$y6A6?Ag-#>?ocVO2)l zn?FzT-QV|*&?Jx;d--djBjGa|ow-!OnJgsw+-7aeJq=c!#)1|`Vyl0hs%{1LDL}eb|Hw$>?f$&Lr%B`@IB)Nzx<;=c%DT-I zQQ&Ymv{05?aGqq*yS@){D3(^-<5q|j_JH-Y(Tu;JL+|*LYp<5GttA6+zhxw=efH3A z{kF1CckkcJU?&@2l0Ck7t)1xv(B{`^qXyvc1p55#xc*#H@X(f9hP~hfYHe(uW3mUz zZrZBy6f^hz&?mzK$uj^&^GvQ}^AVry_52-yZBr8K%TmK;^FO7>2mij)u{9gY(a(lS zQ%2|a^LX(qVac;$34@7M_%2r?;IM<;8GJF#mXHrVEtJ%d^-d7G$F_L|+BDmW-?pli z0e?I>a?EH{EtKU)I}G@0ihF{`YJ*ymY3}Slc;M9*5bxzq-8G)g82T?o_yaUS5%J{ zXS)auf;;LHFhRN$s}5b}sCPeXJtUPGe}x;^^8{j}ED^3?8rx-w0L12D2bst9K2V%0 zS3D=$rHeviIH2vzttVdrPXcT?_2vBg{9q8|Bi~0GLdX=?v;Gd(DV}e8ltu~Sokhyh zN$kWJF`?Tx32@5V*SexSWPBbo0{!LhGqYjDDx6@QpFP}Dlas#j0keC)N6)ef-IsYT zqVfv?vZ}qkor`@q-Na3Gu?;8J+RCi1Opu#-%D53IBoJZ%n-Nr79S~sNJjq*6W@HqK zRjh*>GXfoJWYCG%1CAV5&J$vaNx7)M0xts$XnU;2P)Fh4oYQj93agH$!Yd(>zeT8a zV^ZDqqkSd6+6-z{_%kZ!@=+D>mJez@}!5R)F# zC*%=k>;cUlqkvDTEb1hQT*XM-$4NIS1J0dQ13~)Qg;P3T*5LALH2dI+0xNZVcEuyS zI*ACt*N2Za-zzG)0uhVQBCSzkol0(X6Pvl{_k{tP4C3YuenN~hcOdco%k+W(>)wS^ z@SX2oQ4Xa3%1aCZ1kksD;UxMrlTJzPCw?ffI*4KAXwk74Sa@-xe}*}mVk5~7NNTc) z;@}T((0NTrLM^Ysr-yGSW5N0{w3v!V>hO_KAjfx~6RdLpA|)8G?K~+kM&x!OcVdw+ zekI6Y>!$#^ffmQnpM{5e^1I?wg!u@h+wiAeTDVI);mAHKAc2msnT$gNDl?2|+W0tmRe zit#-n!SoiKnh>`dFf+uK&HinJ{64vbI8@-T$4KSZS0jLcDL!P{$Is8p%PU(Q_E&)Z zq>8CI<|&4$e-j`+MrUXR{uKT*Gk^+(81_i8n^pPQWyc-O;J>||3m-`jMeKacTAW1( zsIo_AsbP_8_MuF}+pga=onUD>anksd^FJ06RDj6nH`=jub1(gEO`)jAG_X3olx=4+ z$8f}dnAH5|gK=m2ieR}$M>Dl2S`=7y2AOBF?W-pH+Bpk=)^4$^u)5_x-=J+^DV<61 zH)?f?{%u@ps9o{;QrzFebq3D`04;^JPk+tW4IKVtP5k!*z~IHia27B`24ZLij3~e= zOmK^uKc9yK1{nezuv?w05x#lr@qo=P(7rGP%vmqurw$-??2yWGe>>@C9PVvRb8`!8 zf+{w}%yM$a6ebiYq0D3ukSvd^wcR$FbGdx&YYsi^naH-;rRkgH?A4Pp`Y(m5e$-FO zJghVPYi&j2XVf8&&24Vp3ka8}LjUOL-rWs|u8(T7+h@B3A`DrAX{HX)oXs8$9p?Ds7zE~gHf z{*-mfw5k!t0_KuXr$fKE2gAo@lG|yv?}fD2f+6tZvkMFU@*lFMbxJaTk9JXYXk2rH zWHr(3sn21kTBdSg94iN;`Wld5*I;ED&llGXpm;!sMW%l^@bhomiK*XKCi=ZOdIG86 ziZiZXXBZIuB;=g^teI4F{b{!E4Xd?*52xI(ZZzQKuGm#-)xDv@CU4XTBxI!?4Fl%L zxXxohaNNAnzlQw$lxM#^!TASnG4v{s8JFH2~M`jB!b7kAxLFVddrdBax^e5=E0lE42MTP`^ ztZf{-VBRRITLq)w@xrcH8~GGccecEN`Smm8?Z&-&z-|4K9-;oINzo_73_tin&e-!p z@GYU_$dGhG2C1zN{kxK(m`+k}*B{?tk>XcaWf@=!l!Udq!%6Z?a&6E?ud`&o>^!9R z-ni*l<3uT8|F?`2W~+A#PBD;cO@3(Z8|Yr}%Ugj?OH02gjNXQc-T-$YY`nQ^A!dwT zwD%%(U}YAm-?{%~ncv?Rfn&?z`TSI>al4Ip2CQ_5c)44<$Kq4$F!J7s1EJNv32N)YR!T^7HVJ5l%M!% zK_6!|8!5k^MHFk7E%#gc8!G884cTD}g;#=X&jdEFTB)5t0j%lE+VSpXnNvk~g5hg~B|e=TZ$WlA;Z+Xab=?5f z#VBO^arZOLnzwH4NDqF3d#j0_JhUTE#Xp@5Ot7Bj&enEu0k^H6$rq2tLEhs#S1?+> zRN_l=uT`_`Y*qopv%QP1b8>X*E{P1}aRpzW1L_Brcl-xBPf^7i zU~Cs~7t2omyKQv?06A(X*{!=S!@TYqsL3~Pmlu5Tx$Gtn&}a8+E)AtF-ua7MR8DH3 z4icxUMw6*&ZwUs+1Da}Ul4k2H(_P2}tADg)-mQ6!`6%|tG}t4+W?j7eGff#s0qFS# z0|F`O%w+Rv_S_PZpNzQE1kfJiqEFBDxSc7_=X-I36d7+~T)aR6YwDsM{YQJtZ+-r` zmEDY7e#B)Y&4kkc$vS|6c3I@^cAM|7a4UM9b&Ay;N6}JqGqYXZY70Eu27Ujg=gW<1 zx9SBta6?Ic2YKI+H(Sl9Y_5|x%&{geYDY8>!GiXvlB@o5A0*LFsH1BxX)-@m65JbR6tRX3xy9*Da?&Nk-7MJFsLB}bC)#X{a+wwAz5UhpmzohZc zm((%)jr9Q!-aqcv_C%6=goDe<8JWfTnRp;5`L5ODL|Y#Rl&-)>(|nHSpc=&~I{4agGM zu&?ul6GPtha5sDiT|XL4A}=yGyUz=ds1>-%JJy%LD=(452V(r{K_HW#wnYfi)J%?m zkBDN|g$}=tQC4k6T5~}(+qx_Yl_BA{SHrR?(nkQ9HdYu9Bs(^8z5#4U)ig}p0I~Gb zU_fTxlX?rR)&~I?4RJC2 z2Z~KQXYStyn&?3+PZm!*DcSjALUFl&jVIdCZK&8I&!X&JBU zw#|-l3`kkVQPJ~a-p6*3m=8RcQ+fTiv?MLJw=__dLrcgh4c>bp%Ub8-%zf(F@}{lbxcN4<$SV=U?Q*mT+$whmETbFr>~(6eIWS;}3i&@a zyLI8LC~rbro$Fo;)0TUN7#-ylI;ZZT^r>eUDAwchz_-mwOx35fAJlPN zOoj|1n#Pd}bAy>!78T&`=2_%q)y%=xon12|r{c5S3> zYq*ya;nlvoT8mponcdo&h!n3((!!aYGu^0XGU$GSW3|J`+(^IS<6_1_Ga|cL1nEv- zeQDhCE7QHqYye!r_eaLIs;`>0TCefQ22Y&=jjgIEprAIK_L=i?nbAv^ZO=42GmkHx z)MQ6DHzFTF<{uR|)XH|5e^3j8gezjLhZb?=9TIWdN!pBY-Ve(Om^q-2yN+bs%RES6>tZca{TkLjN;LsDlz#@^g z#Z;K?)KarY&|1&7GBujb`}nm%Qeiktqq)N&J?MSQLffQ{Q$Fs8)Zp3+VeruW!xSr9 zAmQq-d+dKEfk2)@-%wX?<@Rx04aee`?eQwHq>Z1bXo67TIYS%$(@?hQwI50(O7h>f zs_FLY9x$>FLELAqC+_e8l-|^NL~uIqnUb{9k@NHowIt9+=uy+jJ#$D>0B+0}VNWS@Mh^kX-nDjf~^rikb7Zy9b*p zzNUZ6j0>AN3akko9rHg)V3_)T)3RRql3y9jDB$G&_05~y2Kn|}BtS16fuyCRlFY*X z8H}H8lnGr}safW&H(3K#4?wz<@xS`5f9sEil{574q()px75akY(5W=T-v1N(@HeyX zzti)Wi+w^WTTMbDx_0!3jkyE(!f0=8oFTe|u#0ok#?-n4T9kX3H~BwhRqFvzEy?6Q z_2$X~V#%KW8%y?wHutS=ItdPUvHE+AAcK#Ya^|*sgIfRh6#k3gXIJk3|C`|+0)F^v zGL8Vlz0BeS%zX9xHd6OYlZQV2x**TX%RMWT>o621ClVimZ?4a8?MwJOZKJJ@@^||t z6tNf`d-#0n|HOQMp3{U3g_u{SbH+a!9T|CbHH1ELe~sDO`v&kkr^Xn z+~xd-KN&dm{ON)xwy0lIJHF>)xH+50G(HZ8%zj_VS5 z$@Yge;kTwm(VXq@7tE)^UnFk!NH><*(dOJfb!u%gXvjWL0#to5eJ(yS`?l|b;A#gT z?f!*P|9X1+^B_8|3c4Tc4d@f;PLK7;1SZFU5RhrKdmeyD8Dvnq^Z*G+TwA|53s^;u z5essM*e?mutvVJO7}TYUzo~tGZ9aM@3f|oT*T&o%D7@M9%aZiPyjf!MKNb7$>2BiYc zy@rzx#q9B2Qb%Qz6T|g8cI*+UI<_nvbk1=osxAO}P{c|2d5{z)qQ}r9_V<-@Kl)KI z#Ltdr00G{&b9>MvC1LEG$LM^WEVJ7xqMRys{=yp6`?oXb7CjP&YI_tfUu{6!+1!bC z`(-A_XLD6I+(t${!abz=B6>nYA5bRZX5>6U3YGj7bABi19#}Bhs`SW0E8>)r@aL!< zB$ort5Ya4Y5E!=}SA1~vBlu1%99aEyU%z|*R_DvDR8ycZR)G(`tOsIQ^n$}Zj@|>S z``*nI>ysUy1u4Y_e*x-O%l;7b&gAVNEXDDSd*OjwheYOqRLd9?LA4xobavCRrv{ah zLK^}#@vN>oxl5GSienO^%Z zg=Fn%2Vv#E35e4MP7D4jjZ)#>h=RqG-xMNl8t~fQLs}40fs92_7_7H z`<{n8NzjGFWsjDixFy*U)Pn!%;l3p5QA5r%bk>}=H!p?-uGV>u$g7?z9TRqvhhP+r z^HOLSM?!EGev7iUPY>>|Mj*{^n>66NNvcE9&_jiW;I_q1VM0-=tWKcu>U749js0}I z*@ga*eM%?Ky_1VpeAoROxLB5G<3w-He6Uf*9W<`idYuIr(k||#7GpEsqa^uw?_XYc zgi5(UEDQ*M9yV}yXn_CDaR=PQn9x^`ViLgQqlDs2#wfTSy9RnjZk?^CtM%CbE3?Uh zzgmSq{x_&rK55pJZo0yrD3U$UtPi(cN9Pd;BO$DX%^=YM8uU!eta6NaER>iN=NsktQ#iJn6q|;vv&#q7^}pK`xNIh50Rd$3uMA2|r_# z>WUJ^dPfCTa-ASV$I(+$z;H5fC|81)@A)3DM&j7DyvPrZSBf6rPBpp&Og(`~OUi!M zy+3d73(OMGn?=`KNG0IJEM8@Ix%ha$PE<)t2QA2eq3Cki_vYruRJyDTtI?q{&KL^F|5~RAEViDR(Z| zPRweaK{)JwDt_G<#fZMjdAVlj^=>TQHWR8$;>xOvW;u)U-jO!y`c*FrR*aa)8~uN@ z04;Dg)?X5L=+X-aaa{9KDKYR^T)h8C{%&JyL%Lw^lMW=!tIYAPJq^w9c1ys6OyeRBX{&=-WNHy zM)R{11$qaEEhuE}iH$RH>cw1mjZQYvAAqFg8{Zo7k(#X-zmaH6;6TvGg|pm!1s)Hk ziR}=STy5TQCezy8a(&cTNs12RWr2i)O2YPn!nzaIX)ZA&%{zIw=P@mDS|Ji*9!uyR zgMQpZA4ZAz(-$Wr|&=2XY!A-4uCA;f@R2pwLXj;#Y$uY&)qUgasx05np&o#*@V zw268{jbv(xm!1}mdhD`v=eFpbc>o%@_Qo)ZlwUanbl)m96duUvEiied=N>txlcuvW zE|i1yd&eHn0jS*%8S8W1?g0TNOpgd3>vn)A8L1JNrety-;0I{?wD_-$-=#?cvPe|R zS^L;*^}`s^v&)3no>^5?G*SZQMzAdgO;Q#})ssE>I_KD6xQulhJ0+1W)O0TdK4WJ$ zgGQQt!}^@w=_-T^v)}r_0k_Mgk*dwoNZ};GmIKc${KL$)(=|XC&Pggr^~c&y;BR8e z0irj7qrjIpIqh+p8bA#Ae9wK1r}N6iBp$I3%%5T2qh#)Cg&kk1#g2I?0|J6X5+!b; zEh}{q2xM>x_SgIG{NgCXS#$QgpVa|9Xv^CHll#~8uc^)=KqPx-sV|BLTv!=sNg-N9 zUMXUL=}8M~-;XptBJeUo?H=m-9*mrc8CA$EwXn-4?|UqEzDFih5?urU3A>Ij^a8;qc08BP>o~t9p5(>V1i|A`ol^N@fGjRL~ZcP`K3q@YgeP# zK)*}aZ9n!#pTBDqtzPDG#5@lYO{;X%6PB-d@jGP<=3qk1#Uu3UaCe$H{< zDx&C<$ff|lG6hxw8j;{*NQoisJXT7-q-pdNqrlt~xGIjfM?oKn>vub}SfJWA{!pV7 zyu^70VW`rc?OGVsM+8t}eYxLFfH;cKqOICfc~z4hj@knnmG{fB^2l8o%Hj^W_w}!5 z737WQmwIF9)u;pRZCn@o!Ouz;UzF~!$9wDV$^V2%C<};I0A^CF|M=deJWf0LBKu~> zs+hW1A=Xd4anOPNV9$Et`r8X8q(3xcnX6(QrUSfn^ZAdXEk2#UNp_yfA5&_K$i~Gm&)fWv3B^AcG@{+*Xf7v1Vr>)L8yVM{QG(KtcMidAOoVWp|V$(we1C- zye2|VUTnW}9+~`=gY$XokMR7MIlqXr6L~k1KD@$UVjkG+Z#_vsH+yAZpOGi>%*`%z zVd0$Y+gBx_KBgSPyRLxO=VEJmXwb~2!}ZhHDyw}>CI=wPn3jDpp@Sw?Yd6ILse_+^ zdJ!q=vgjRRXy1K5Hxaa&tahw=*s5iifclxDNeL)H@NkOJd^P09uC#CH{M5J3o=Hy1 zSTDE+fq;8V%v+uQLD=eLiJaFE`Ah4!x^0JoI1W5^`TT`Em<}pic`0cvIQg=@h*33p z$-|Z1@#B5wS zB1yeL>Z?VhYA9Fka|5kUi#q(3iFv@`t|kdg&}GCPK%(XkD=>#XwcDDkuSLA?2tAD$v?%05!|4IN z?4GThh2tGO>4qtB^XlLV+5JvCyq2)=6)s!P~vP3BEhnVIRHI9K1u)jAFBj zG5KK@48HVUj}vaIhfWbJan7|a^ZaI7#Bykx`91AKVyr40nb5 z+1YkZ4to@i5lE7Sb2+n|s|ZjJ0^^WTd%#Mx4uV(E77c_iI;V9*zh3*uOPk7BQu0;j zT`&~>4A4%mz53q}EFc^XGH+WMEy4kEfXINiqYCcVeoVX@X6R>H}?-AwYEl^QxQVwN&D(c(#L6%QiI z%YX|_x(O}$VA($CiDU%9oRYI3-WwmzV!xAfqO8RfeQ5V`Sq!+2LE0;cS}_n+-FRc5 za?t@EW!#9%9h zg^HZPbecAR?r>wYJzSeP?182~_fX25DrDDdIPRqJxZ__S_jc;r3$pCG+i$YBZZ94! zSO{Y7%aX!)F3GKMs!S&~DjY3;;GExAI=%b+YJ1V*=4DLHlTI$lRX*=#iNzte`@kWo zP29U$aIH;F=0o_@Vq;vD$t&aJhTS9mtMLVVE-|3*3C_b8RflCMAU`U~`M>K~ZIAoA z&*Y~o_Y-oZQfZs*cXORkqq#Y|A@Zp0ip=8-63hWlA_FuSs?f69ZL%b<@bk-O7VeLK ze^HT+yOc26{hA(X^Y_01ZN;)vtR|vYy~8+^829??T!gl>w~9m2P2Jx;;>F9=lL zMi6qMy=IJ6uDfO-B#kOBw}h{9lpEe>LeNOf2rj1@tYe;5Qf4J*|Jqx|#+V^G6{@D& z*9$)C6KS`I-Q5<{q*dY5h#&~qy43qlm z9v~d8m$Fy4*SLcr=iTTl7gj17$*(jiz@;`U(*^hxSR>SosHkXnw!xB9VU*e@PdyYv zk#GGg6i)bqMdY>!*#xk_E#+~cNY~Z2skDjv?#Qaxer&$}RcQ`)_E}afFsGt{QSF1f z=PbT2Y?dmHBqN+WeAeP!aE*><>*9VNABU#uC8_CWuV2(bsjzj@kNrxYHXlYhPgRK; z*ngiIpa$@cN9(tA>~46zN=e2At4ohMJC9~(b7VVWA{Q3Q%DwO7!YK>87LCK-#MUfk zZP#wWU*!Q8;&5bbO?djgvGbVLdJ4gxSTj+V{tN5NgWoBfM7PGk-?>go-R^0Bapa+@ zb4P}X#?+KL@lM`gawLV&izPD|yWqYdA$dYRANOpt+&p#{(13(asp(>z-P1tWMs7E= zEIl**!uk~lJG(*hgBQ&95(5)}70#M~P`;wQ!B(_yJbg?Fe+KeG)~%~}*+?Ld7K)8ye1l-tYFI&bM+R1L8WTn>ji zc=mw2zOnMs2O7ZsW{XR*^p7Xb^pauSlbw2EoZBhS`MGoe#(a4zaWcA2ddbaG(%lni zPL|oJ0_oX8XkGo9qpQ3bjglG~{Oz5!^bnm*rZeKNy+Ar{_1FL1EpiHzQCd#xa?Tsd z?(z-5JnLmR;L+YI$z>VcGP)O0&<*=k%6f2mXTi+neToc_{5lt^@`7;Y-;5kMW(|;3 z&FJ3{uLDA5MFgRbJd9T*0@VdhUmJZFrFgEMzDq&7?c{L&Lcp`NcwXH7 zq^WC|{zjY-OIM~rI<)U2Z-Acbe8o_784iW&>Y~>wo*QTf${RE%_WJ~ccraABQec}L zxqpV#v&}!$eYo#)BR5cI1Gm*ij{$6_K0k>@xt6i)2*=_ppey$l6mgWC$zh()Hp`YY zm3*g{K<)lZ1L-###v?Wcbv64k@Nuv8iwGeM`RtNxRiNf?McOk`j%VW?zV#jmHSrWW z`g#7!J+qIhrbC=}-Oc-fhV|dbyp!DDN}G?eJSS+Hr05sSAC@nZd|@!%vvmtVVn}9y zM2@L|NAFYGpd{eK)OgbJ%4tt;YnA z{)C9&>~GCRJd(z!=9z^iZ=Pg@Un8NqP147z0_af%2chGUR{!UFqq(xMARGI3n2vr? z5NsLwS}Xm0(}N9xbaSC0Twp;As5RSwb1`HNx zq(cKL>`Cp0rntgJPBd*NjWLiGbIuY?opY_4N4>ki*LRU~SCLD}<0H zT9LZ(?=p%Di;pB_r$66$u0QjA_MFp!tk23HmUm0SIWbdrd!`@UK?w@U9&flON1b`Y!NMDAg zOQWQEm~~aPbOOI-`-~7yLrZ;prVI@Rf$GK7mAb zC57x~q+Bq!&{-WWc;%G0_Iogg9FXXNyT}TObdiM~K8>R}#d_d8(VN<|1?y)|S3kb4 zXq&?cDX`Co|AL5Vf~nB9A|I;3eXWO1K)AFqX3g`rr@}p}7cBO}a9*t{%32L<+Uwwd zgL22`gM)u!w}~lTo(5MUI`d_lzZcVEKA|CO{_GqtBhFeo{ev$LfLahPgD;mH@!OW$ zd|9+v8q%BN&t@phEKaEtTKu0cYS2BAn=!^byDppHxzQ>Ih#Wo0VQL>cR$IMSmwLVK)?`Ot-DbrfjjUN#tq&!^I|KcK1L&H01&^{ z5Y0FczEZ5~`N0BHPVoMron;cn3}c$8h6D9SOv+ z;H1~FZ(ru)0oI5CdcO$zo``=!re`FR+T-sAI>Q38|l2oKcEmt?oOGZ%uj<@MY_QDlJ90~0^9{-?ktEh+8v;9C>r za$_zPpu)H`OCssPq~a*=dq~hPnhB(P7u;3WN{@t1d7oT}7_;#?Q8Vl+El3xCXHiB4v6KR0$m>$pjj#k!6L~39N#I2bXWDl# zaDmAQpr3{OdX(^4H3+(Sb!va$FqDi}{NbiK;w>{TQsM0cUx@MY#^qB?L4k^bV$#BM z@+Gkvybr~wE@7{b^YMAZ-x7L{>c7=kDY8YF0xM7T5s8sMKp(R;`gkQWF#6ci5#|%J z(c?4Tv7A#j_xVvMqI7>El%s;Z!JA{AbDDS_`IqK^Wr6W*smI8UHweBuy%U-ZVrZ=A zGu2obZgwZbe0xq0xms~Uq%%_)=sNJ15|oEOQh$?Js?!1MDmL=$t1vWCFm>>%sYbZQ zo(n`44}3B4dUUoX63mf6o)A@~Y{oscRQbe?i|}xU;~H5rq87!O$`($x)^-{FkH8gIih4!Rz2xF$b}D zg!AG7bf@G>uHv%SYf6bhC+O%+WR%yxs7IyOy!pb_Irmjdc>y0rvsi*U_tpe>@EjT9 zd-JiyjG!X+wOU~}XT|`a>Q>SBnJVR)=&igsCK@@`mhdySWxh-3Md!s=j3V}+ofj@o zzv`q4h|y1&D2#|Sxs93P<22$5K|yOaO5#3)QPhP!Q(B)s0PcU*gD(N|l1e5OKp%_a zLO_?2XVfGUDhH)kqJ7> zHQnuO!I!zq)vfI|64u(106E2*Dnr{)*Uv+fl@=D$OY4jq>f6&}OWufOwFmJb9Mtp3)(pUbhFm#n{0@hYf?81Dp!jqTrtQ<2;}ups+znM;;WT#M9}7#34>Ca=aq@b(mHREd1D0{Iyae zfKEKEdic>onTa1UQ#ugsanImJ=b(Mz z6KOV#DXzw|JHQ*bedMmwTqZaZjJZLxY#HYF-UoP7S*HAXQ(=8_?S2OOqd4H=?MpN) zUPA(!1?zeD>mNDzxOv{4Q_3uhWIz24kaQ?*OXf^1-`aNB(V)B^2NB{4?$dhFF$(w^ zTt^*^I!J-uyVtuCdmpxpeGm%2z($|$zZ1~*R=O|N=3S6^St(gm=$h-GmOc{#ik?zaOsWWiXGVwwH0b%owIlVy>^SOi z#D;m8Y-+H2@1PNpYf;bQDUJE2J@`?oRbQ9S&Bny&7^lOr zKLrWBy1bU|G%QY*r;UyrTa%PF;Cei#B!ds z?t=*ET6rFg1Qo;rWTig0XD3fPO9_BUJl;tWozD;9U=so2_S`AgFB#Ib4_p4UYHeMS zv?4)gU+#FJ{n3@I;)!jozzT6vce^FFN8~gOwHGo)g)3~W>^%5e$Y_MGJAoZhG-kU@ zMy=aFrJYRHQi{eVwfn}pBoEcdkYY~^uhmlBA_8Gf<`eU+@P>2D=66g_0`IJGlOUq` z5(^u`Zx1F>n+v0rfl1F@U+NOE$f310dJqZTE+bm;!D-w* z9<(@1R2=6Q0OCnqKNqq-T59&Dv0)xiCxjX3kCFQ~Qz@dv}drzIR#Z*am9pE#Hx6-bvlYu)R{j zd--CwY;$&&>7e4Tq=Z*M!TOjb6c)f}btv6aO@`Uw8RpY0DDLY8xv*3_eutWWHo2+U zsyC?=o&Yr)Sa-hOErw@Z?eTcq{q225K10lCXz$OG40s9Ic!>E^jL3+DUD(!^tC#Dq zjn6-QVI9vM;^dMiuQy^nk5_!NHBP_1^U4GMU$F(HVh%)IgamClq8eHgA}17L*w9SN(&v`B;pEJVP}|buCWTYXdHJFlYi) zXtTma^*l4M<&mMBT;WTKEE0hZ8s6Jb+AF>7#HP#M8IgB8l$>E-_}X(1UYPwttk?L7 zW%D*ZPfV5eu7bIHCwR{8;zG_6aX(z^7k2xZj=^0b(O4VcTq0eMlb4ozKXlq=e~y8( zX{er}Yk?6@+LlCo zDYdbSMQn}NXxHnuQismZXN1g5S|keS_PfFpf;*`%r}8AT@x z24Xl9q+v3rP^oU^*LNi0&iMIN2xy_kzYP=tdg=#Sf+MkQWBpP#J0c!Mdl$O|I2FcI zi}Gd&58Il}9+W+>P)Yf6=Tz5=uZNpy`A!jCS9%UvW1e0#W2~7+7x?oPh0rx+?HC{n zrK+Nh0WJFFS|)V(J~4~ldic)6&cQ-slVQw5-> zN@^JW3v28{{k%YcFu8zeBvH$O!n-Sr2aT!Q4hJE>&RaXr-8mD!c@ARYD%zX#ezqiy z*ZJ||0g)g5S$*F`^A&-tRKWg3b+|aE8W?sD?N|yUu8gDF=zIOLvcD)Wfot-!spEA# zar@GjTbR7{-}ap{@KE5z0VXdz5=Rst5=4nW5beHq!OuVRZRpWT?3A+?mRcmXMJB*J znf!XwSr~7N9vNGkG5)?Eh=Y27Je94BKYJ0A?D8}2PGr2++us_#cRW5h&iL3XZIvpP zvn6lVy3O2&o}*O_QOdbVS=R(eXTDx_4}EVq-CA45BU7V*lgEBsCSK6ULlRebsNE#_ zM+-2$x5tW9dYl=>?cUn%*>Wyh7}G-NKCR~b?10%lKGqjtG{hx({>M#--u*j&d7;5?*z&CO>#>=J98cMcWk2evEpp1+ z;&dCuas5b)Nw-{Ug>$EE-wwhBiy|W!Y9Un&z$CxH3MJj?De=ItJlkpd zC~iwBF2$5%SJ0gID(9(FbiLWfw8J;W?}ZxN+hIw7$AG4%XcJ(#Qr=E^ao^A5+Nf4N;c`o~3oeDg!Td4jg$mLF} zx2t?VtMNcNV0|nXlXB+g$wJ-~N1_)AtC+mXdcBKd@h_O0 zGRXmko9Qgw7ztE5%`Ahw!RhS1!Yd4{C9ZsCY737vrl*~G>{*MM!LP$7vOM~aDihde z;Q$!)u-FhKz4Mn1D$0m{LkKP&hJ}q(d@fJq-2_@*XZ{;sM;<{JocBI=CKMh_{t~?m za1S(e%86w&gR+bm7uCyf8M(Qit*p5X@W*c^4mV}`HOOyr5n}ZB0^NOOIH+fj)Pw`@ z$B&%{FxmQ0$x7`(K{tc-F9+$2SZY!JGer{bJ#Ux0(2lzk3Q|0TMkO~`FlGD^#E{a{ zb@}m&fQJlvm#@p$S@$}WC*NTimOUXZkp^#8VhluO0A`C$LNB$wxHR@WG*TO?%ADX& z2UV2Rr&VpfB0WDuh(jW7xr(#Qd%{B;K^8F^{{TbF6ngbSF|Z|Kj=k8ujvo}G&3<;y z?YUBU;uAqQ>E*q*3Q39h*e5Gndq}Sj- ze_L6NRwE+0g(b3>kUWJ2tc7SlRnSbaO!UPtKBNSeEAdAFS0wwge*SJZG+vg*#s!7+S?C!bW6G}y zpvW8i7^;hukDuOn%Zlov0pewtzTeX?x}+drnbC58xoTDW^p~G;9}x_QrrEmN8p(}5 zyR~n&dsZZn%YLR9{Wa1FRr#J#7Bx734L=KTzu>kOV}iVf_kky_@?fjAcPQKQjP%2ib3ayLc~H#M&BQXE^KPk{eF=Mp;Ki|U)030 z7&fi@!|bMib@;8bP8aME*k`n*`E)GPDe>^BR?Ld&bK}Lx!%G9ipyxE_Z`H+MaK_W7 z1YA0U*bSF2Al-r8T<>@3Z$P<|uFTmi0jR?B2hr*%j_RAp7XPE>Y^)hW)y=Dz4Ib~_ zcaL$&{3;(_CkWovZ`S&9XZy$c{HaUxFmnVs3a!){Ljw$FI54ao}UudJUcU zb)jRC>7k!rM?Db;7c~29?0N2x4L?fP>sTa$@z;ADGX1>{;tyLPBX*@4q*`82b`s?5 z4bU#1iC`bXS?Civ;!UsqTnU9Pi5OCv%G@+xQB5gMbq1#EKsR~nC>0So&bubtHpoA- zW`sKuCly_cor+78s>~Mi$>eyZ;7A1ioG-qlt3scj$%KB;Cuoq}+*A1p&4rpYvIFdL znOhUwM?*t6QIk(d3Hx_|cm}Ra0Cp;1F;W7Gq3Qr`Tq7eaoWm&)VP3)b*-f2(FwUmn ze>!f)sKN&Wm9JiA+nqu0Y@k_GL*{240m^%{Vu5^#3Dqm#d8RSpx0?R6Z zO^mcLV|-lIJ}W;OsyY8@y&KMv)_;nvs25%dhMs_Y_Pl5H5

=nVeyVEm?TDXP@rH zm?yddhRFn&4q?F`6M(w|0ZAiSvI;OTd$0c0wb|3QVuWgV%3KC*s0W-@v0OB-ZvByJ zN|DL1>L2)GYr6w^r+G0_vqesk^LgGGl37<|!aeJ@%Xw2()n1~|Jt_3Ibdie$O+~14 zW7Qj`Z0kKx1ClGRgTs@v9XBXvI~y330DURsw1;%c!CCNs0%m)!H0~u+m*r(qxuk4=mdQzh;S778fKEfAdoDRl%VX`r}ugxIZ)TmV!u?P06 z^tfVBxPN4Lu3~m6ti9_AkJ#e#&PnqOQS5XiXzeFA@v-_wm|sdO30pLBG>6O!{(Q<`2}%cu-q%EaFU`>cOa>el zBh{jwUR;dWA-BKrjk=){6mVipye^?NUk*%+{bn`tRU1cO4eGt87Jv6;1NXOlv(2-J zbxm+;siA(BEco67VI{zKUykM%Q`$k{F_%^yWrk)%C?pl=_DrnQL2&O|awcN*2ry;) zd)B|mbcMRaICn+gmTFm8#jj?hsaaWSTveUH!%kgOHl%J6^7+F7fqsNMUpxzfyS`To z!h7tF9m;Yj$K&?e@;^Ue8@w@>x!q9&_Vp!iaFJgDKIC0_$5CUk*K`ICT)k`5u7XQ1pY;(^)h&ovj392m(M22W&|=KvL4Pa;x82#!XaIFN{VzoB^G}L<1qHr*}oLGW7#15m>Q9 zPWGtinKtT?8lrcLvhRAQ=c!enzjkp(XKa5vJd(nf8hFLNGquSq8?`$2kh#WTHja~e>GV#|) z;I|znGEH4XZEp2DUjhqV8e*uXX#qVP5DkWkUd>;_QZzu{+#rB#r+vngPq3*NvB(=I zPy$Rq&DcIWym2>qdF`;Q-8m4WI+Yw{Ba98ZH{s0?;8udOyx-lRBvyu+GXnf`kgv%< z#FUSG8k5I*+|C9<3;?SQx-nV^9eP~kWZRcQ>ajaF=LH;=0B_M~``E3m;`8MN>RO2T zGx%h$`BG($?>a@G?9(}V;P?RsZLSwG`x~OnP!>H<{gdYQF~AYEOcBJAgE=lnoj}Tr z6l`u!4v>%?NA^oM4gIQoNde?{4k!yvKy!PYKZyL9f?H-xbB_O(o0v($SuydGj&S#FLNJ4%kZj4!A%?FK5!p5M%t4AJm4l9{IK9 z#UJS{Sump;c#>JE_icl(^gDPfqbl|FEdzfIjN4tV^?d;9o6*q#jkc8q9Ugi0EVB;3 zd!J~&9N$=3>mW3%ROi4$19TFMF%K?Ug#IxcM&6>)dhnqoJEKdx1`EfLX-2t$Hr%ZX z5+f_t*NHLNW|u7*g?^@;cPy=p_h=XpAe9ObJ@1DucenVQXm!9Ii+g4La+xWc zs@|q@y+scjvil#3F%CzJvL0C>4cfEWu8fh_!_7OaPFXb>90R zXkNMfUZ5%3SDX6dLPv0cdCb>B*F!|M;DWElE<4gF{V~YrtF4lItbWIsQB=nJQey&4 zoT8w5Q?@k07gGfay29mRK6h19dW@ui(T);J18QKn)yH6?2TKd8-EUp$;7IEBtXh=a z5v}hs72uE)fv9+ZO?LJu(A`?RwIyZ)M<*O$g1H?|Syk6=kW-bW$|F7lA9YMb)g~o% zmEdf>`fD}2b-vqatO=U@C<=Qhu|_P#yMW;c+jh|HFG1`WQZ&i{9sP0ENuhrx#cp&n z{#_8HtGQX5rEK;WcoVoH^_1v``atAS!fd+HM2+aE5H)tX*fI|Apq9yPBpdrZqTkOO z_`%1D$>$incE&9xVc^EMZyl!Lm2pGoK8DKAp)VMKjt;c*BN8dnK z1}dnIQK5tw4!K9PRuUVxNI@et?cIAeonJeZ<^LaBZygo&8+DJ;p}>H2&CnoSN;5PP z5+Z_vq)13h#|$MMA|M?KN=XUQFmys3VDJ3NvG zAKw&d2saMjQy=UJ4b%w`&`-+Wo-%K>eKzI0U>Dvu{dmc-u7`Wd_>(51WHws-Rxf6E z-&6bWr&GFLfErrdOPIcsx7g^OZtq4L+Jg;!VLaoU=%Jf&G-(Y7f}ZQnRDS0`_tr+F zZLYZ`Aw&iur{ZQ<*ESOQi?vht1NxK|r!^&KUggK)Cfza!g(^Lr3+*V#+UrPkNDUg%*d&ObgCWa*2zY)fRBYT^Ud)GE81(EpNz{^#UaFY)T+3ol zdWl}b46)Z622r*o4yPYwnDed=1Ru>n$P3f|+Vdki{2y0H*;(6s*N4}i@S`4XB&5-$ zS(kZT;3{a!)}W{V>l2-EMGxm!lO)lSO}7}P4Jo|SR3HW7xgyUK+bDd00lfRqNDa;N&vlqji~N)~Hcs+`#l}}!*jAS9Pu|+U zS?l+LOc58D*+#Z)1)n4(cvg2je%^@18HuL5&0gq8PSSjUPZ(#+Xs$hc{`;&gA+~lhyT5tX{k-~;Y)zyyhkx1)8_PjDXWkS@nMNrx9 zpRqFoubbLJBws$9OsZPhed)!hWG0Q0<1@iDzD;zu*VSk*oPP11CKjai|62{D+=Dr_q#A}e)e|*pp^AzXXATQQIeaS>dU3@^Lx#G#JP&W=Q30n zHq(p$vyC|ZrGn>wjh#q5%R%lajA^egD{08Qaix8Y;>F;(7IE%uy~rrC!;pIEFK!9a zr#6_MVh5+vTC84F+BWxp8&y4_b&o0a>~8fMWWNp-U^SaA232~Q`H4P)MMknifB#9$ zDQ$P@f+98sQyv&-DuWhZnoO=7R(^J1be5Xil_3Bdn;_(QwtK)5Y+^b0aF+nrm%mo( zZ==j62JfzwT$cl$M1b?On~$!hUUyaOeCCb~f3a*&hfnWrZek_@x2#i7WNRie#4hO@ zT+^~gjIY-O0DVQk9gim_#7@|x2j|6=Y5>|4SIEfgg}|62!|qV7HZ~`%lyDJey|_Y?4E6$n39*!aF?1itqi*8E{OxzCqtf0npkG#a<2|3t z=!zJ~WgMu`jlib0{8M@5ZMOC>zK(uzdC>i<{P%x@6J0RgQ7A=URNxM(&JQ88wm3*e=%V0 z6RBwYL-9sDJaH6tTG{~f7xl?-Y$7cPUGq~{c4wTS>FSc;PZ``F+#Xg(?o%y?po-&P zl^XD{dd!Xj^_odQq%aDi)&Fek#IQiFTbyZCX{}5kpsc2=7tipr?@Tx3K2%3*b01$q z>N#<6)&hlI5SHjh{Dmn6de=Wjn1*yt@tY|Su}l?=r}sSO9)#)GCp;3vB=}y}Rz`n< z0w4uF4_vzP(VE)C(6?vQ434K$j%YN8uVd zez!FoRq6kkj=%`eKbmd0RTxSuRrs~J^GnGT<@-yZ!k!Uv_^ykZKS6#{KN8 zbnnKKC$4KU$4g%Sup58A3-F}coq{cuX@#mw)6!ELVi^3$mDDz)cu?x&Yc$sVU15(- zIp0l7b~BOKe30Z0JT#-`jDL9reocw`aFO&L{IHwv@Iu^Hu$}J{>znOz2{*2hy|as2vTZr=L$f+$t%x5>RmG3?2(f3}9NyWTxOZ?36+d~{4D z>X&j5(26&Qe{~;{bC*QlTaFn2wvpobbK8tWv0d`4E5dqeNirA53|}-+5FZ%K1{0a@ z6O*myRdMN(y=uH4sm->1%Nf%fNzkof&1?cpT%YFjLSdJJ8Ke7rt54b@KQnA`L=bE+ z)6g3|f8+DV3s|$$xpGi@5_5v}TEV{3(Y2T+G?%ZxG8p($$|LmB8T#_I&Qc>#12^ zh*YACqg_eRn1f}jSHTHsZ$&YsJ$goUgQ^2-`Pw3A8)@iB;xE)kxgXl7&@dStv(fNA zk$QY&{ErcgP6;6v+AzXOuThje1yf$;X${6$iI`zwwX31 zn9?A~%CsV2U@l;2Hj-wEb(}&7To;kP8eoAT5{6`5@?eLui)sYqet8J5U50#hg|bmZ zf(=ZYH6wc3sd@O8i8advDIkA?UGF27QhM(mKN+(2{zSEjiH+_37$ z8-&nXf&#pU)v98#rq_Cd?c7`yVn21Y^x_AWTr`i1|Fn-}QVu?kn}s9;3hZG~3Z9r5nL^$<)8M{ZHSP78L9<47SdjJO}4vHwl)~x5x7FPxxC@Ea^Vruk(eRxda!9+;b zJ{2epTKqg1K;{8pawZ-ZQMA(&f6hq;866^8L$<|iG*fT3rBCLwl@a~f=0tF(B*{ED zMfAyP|MsWP|BRmiqSXV{o-}L01oX$!ybO|h!r!1?E2-L%n0`o>%E^+bxEE>Nn284^ z?I=^|Tv`$HRFBHx#)C9;QQhkp(~`R<1&B09*eA}5%xkBlIQ`>6+~1sEmT-1jI@q7; zhigoRt+t7F(7!kcf}DP|4MFcMiTazR2(lPGsr(dM8%MJngQ4|hB)lYd3D(~FKC0!w zR~*;T&N(eHVA{xIMYa~f1tW%>O-xwEuIFIm6a<^J52BDvhctJVh*!D=tW`p*kP{Vi5d_EY9WU=HON zp|L`?!I$VN;Mm=@pykDC-o_V36(*Wpc|bTwhKMZdhCKT0Dh)Q(ioy8exmITa2lzo+%MdGEuYig;ouGtl49ostRtWt%JlrnW762+6UH?8yrl zGfpH1M+azHN8S_>Fx7FsxB6i7$aADlJ*seeZSp8azUH~^t03Xa7ieuMs|odJeCUnV z2nhWkbW~+{K&$e?{@ttQRYm!r&J!yCaHvJA0bLcb_b{V|>RgK7^k>V-Xq#H|GPjj= zr*NM{pF^Z1+*OwFv#wPENLKi3Y@)M?PU)Q;t=y2Sld8qJP``II8}9Y}K(_dfb8da` zA8<{Rkxk(Zw&Sk3bQE4QM{9@PD@6R9BR6blD19$3( zW5$%&(~ElqJz7dyiMzp`d8E7>KeQS#ZU|x;)dXxRA9aVUWBRHQHbVIm4LRmx#iYsc z@Yg9WjZr7l{%{b(W%;Zd({A?Gs-L#Zjnd+U;ySc5xMDOWPGVU*Fv(wz4lNc>l?kCXSOkK74yQDY1qsIVy zyvKH4n|^_^*R%P78`U|a%?2oDL1WS8W0>9alcybWJGE3g6+TIgDmJAjg-om^hx_c3nFFM?sd0xB$R#zJoTb#v(8{C)65zZ&zSf<$xrbY?~I&{ZS901mkSoY4JhI_MeoC|PYx$}`U{p_P{SIsNw!0XEHiOxF%tX(O> z>)iY7^73qCD%5#V)Nkl-3awf8&;vC52Zzvnq8}G^sIw{zH4*3Zr7@77{nPH~f*sS0YXPa6dd1nh+(~RYT48*i>)+JL z_?!8k679Nau8IhlY!j{A&AUPcTKTuuv+X*W3Gh;#+ZbJf0a6!f3&-AUYVnf~sws6= z)5$>~sN?#OF_90=uSJnm=(}FI;?Ad0=7<`{;B zm#?I`_Djp9`S;L>)?s``Y4|Anq|VGk696?SB=N%XMM4{)e^UZaGqFg^hal*?c>b1D zIa8N|p37J0DfTw0a*jN@`L_IZr=_D$`xK}zQNn7pSJ}ZY-_Yf21M@eCt61mfbGH3Pc%-t5ou>ovc4YU0$-ocAnp( z+5Jb^9F()4YNMR&2=lC|kBMMgedg^WF_*g`y_)jl<&lp5IQLcS#@jN{MZJwPX6}IzX zWX*P7gbZd<((jR(dQYlFD~~Nd)GOD&*XX8CcNvYFD=-`-~DAh*yKT2R2X4X zYHoPZ=0gz60ad;mJPbYda;?;0`vaSRkJBFMX3Xe7M&G!*X=_0`^l zWQBD``IKOGV!zH$HK-|gvewTlj*u{)^Y!wFat?Qg=*agJAFY{WAAIqRr&FQnFq0&L z4;%7L$8O@I_phyazL??1d0rWO$PQ65RjB3ptPufKHJu|i@h(*2eeue) zKLS=qm^9w#rR@`|r4mOl)*V|t(G5!&Kn4wnG*C8*4Kpz^KyNQHSk{mk?pxFqc1tU5 z?Z7`@Vze9r76&1>cIKjh>q|-kL(vREagK?KE_vb?fMf*+RNvJHJVOMxBZ|hP?FaD*Lc;e8t9RtO`3yX8l{Bin8njPiGahzI!>{%0S$XZFZLpyRbC|Z~F7QcFE#Y$f5psg@2}u`-p5eIz{gT@rsZLdeIU{*1+Rq}r zoLA~nCgyee&D`hO+0`DQe*$EPciaw5Chx5RLy#|i(l&iG0ZJzJ!-_74*aIUAX$-#M zb5~BRCv5$y&qeB#5)NQdQ!bAV$A@PvQgF(Mp+2#VQRI`Ew6s4oeVxV_x3xnhX1&y|!3{gNK}dEiO%1cdkGDb0T(Cs##RHeZe8UAF$x!ct5)cTg09xc0rNf(K9tR zzqEAL#OM$BK<|G)AZetV>e(c;gbv)`%FX$$w5Wpt7q8g-H4mIFrW((z2oc6`^Shz0 zq+M5yVZ-4H+hT7R^siQ2yvNEo^SA5|cQ^!UN!Df@5>3b~8nK-jREK|vy+VSq*Q;3c z4vYx9zqQ27eD`bRZR)V$Wa)|;wb_eE8)bk?ewkv*!;~{+&a5CB`b-YRi8U|u+-4m~ zDmJ(VM+jpR&G*3RA1NDc>!Xm1^_AF#V;(d zcdO7l5Hscih9y=?*vj;fURi286^7U~0A?CQAMfEIpL1AhMN(}V{8DGz4hV?_j6_mT z`+M|z5*k}ZZBJklK%5fyovH=e6t)vJ=Ye?B!ZI91RnfWmN`$F$G;wmrU0D z7f`30Ieq3CbM!;{4^DCogSY{oL_SI=rQn@m-?^YJy%d0%1YMpJ!F|VqAS_5q$gPpl zuAf&9oWR~<=`I4V^>WOT7zxKCk%GcdeY47~Hvg;%xndN|je?wkfij;4y(!Go#+l3DeqEU50X?Qp}yXbcKJ6O{tO22y!~Sg|*Tgjcxc%L!t% z0CbLd{K3U{VBB9~$14xp&U?a3wShMmIHso;90-uRVy&v=TNn*Eegx})(k@ho?jU7& zmN^isX_19L)q1opVzs|NG(ABr-6CoE3ly6)zAHSPrAKADA}hI^GKnw;r$K}-(Pjtq zNLsGE;@e7JlAM!3Wh)P*-^<6_+wwx;?I)bJ%dox|^^t+ryblXgs?{kEa+hkFBnd4{cD~=EC=HYR!xMaN#R&mSjC=*~c_y zJoK<6Ew8kPR=Luk^EY`!`r?;r)X=kXHwRT$y zV+L|!!|LcWNmy!IEs7s(ot$iyY=z**vZF>+w#Y~cu&spQu=(IuJ78cZvZL`mdnr?X zd(*+(?AeTbJaCkDXO1vDaG6u~p$lZ&jU>UoF_A=aCX4{=1(4#U5r6$N5?a;heT+S@ z#wNh!LNTk*K!MDuiLQ*Y?`)fLdO~)3hhKfI>?`URIn>miiqpj}6DI zsKndDh8447u8WT#N`jIo{9B8kDDc91sWT=C!@3I8?+mq^giDRByZ#b=>~R(7$r$)E z1!gw*lfQEuHTCg?P2IMycs0xcgrRnK?kXijpdz@S5{Nnlg1Yr44=M#ud>K*%ppFuhXUafq;FgD4&ISsO+;tA z0-j*t{3~a^IzrZZ2b3~$Lrfda?;Y(sE(F&6TKr&E%z?Wugcb6yJRa#jPr(W0TCCoj zK&dfT*gHK5j5Rjgj5Mm$>>OTndN=tLuOBUzjE{Der&Z{Mmxd;&`qKJD{hb(=1+qW; zGR5=>3VVG0w~#w;=95W`gd5hzY=RUxrS?LAZ|q6MY{BEbWIX)PaDCk+pUNo?5NsxR zE0QPk>oA9&A&zH$va!1K+5^R5Jz>rD$9?3$bXK&y;;$>P<}4-Tg~Ge)!GNiCK$!LG zqW#U$pgr;td3z}2Ef7Z{t~*ey)DD+^yuD9)=SIcDg*L;tet`S(Zufw5iOCVe7E!jr zKcJsE!>5d8n#xheo4oq&DdpZDs4P6&$f$!CG#|SFG>dZavQ7X|cf*E3&ALQ~B|Ken z^DN#Avm2DhFH|mcsS&4O{8+ry;gLxmWr+9yA(KEfd;PS`G4p5uW$$-BH zxCSJx5sSAi|9e>+rTgK{k2`!(0@&5c{hAEP*K(Z9!P#$HvY3~3qAitHu6L@0`9@p~ zfs&&>mrq!Ctg^iz}%Ttng$~CO>k`Tz=eSu|i3F#BY0E2wZkE zuWIQ}QAk^7{CUMM_T06v0UocW{NSJg5SolY5~81{dr}`2|DWp}cSAmJI?E0aGx+^X zggwbY1NPH)58XhH>4D3fp?--J)Yi9PtX|!Zj)oJyMY6FIb(8F28+fp+ah(uQ54>jfv8VEX=rkL}MdD0U^&4pC0m41q@F3FWG%Ha3gUoZgW0_j28aP&+~VtWAq$^y$FBnZO~~`n?3!W zZ()Mec3=i1kKY``mo?ja8M+hujX_pYa|>I06PR(S2LD*_?`W2HH;|u^8eGSDOpx@n zxv!h;4HKf&BgJ}aAE}%5zaaJGk@LDP22Q#VnVg43XC8+9j3-{i*Fh+qi;HF7%3^xm z@zK8)r)-kF2o9HsUm_?l&H#mRZS97x)Q8u5ZnuA~EJgjmN;4{=Ulxx;zEG9;$9qe3 zXHT{NUJjcq%9&Mvz!KV?*8C!PHA3Zgm`Pkotndw$-Zkn2wJ6gI=Y+3)(t1=z$-gjm z@r|RLgqZV1Nw{H6B`WK!r;HW2vsf+?Ql(6-WR z8Ss&@ZfJIf!wew7h8^$hY)30*^1zlgy}t@YVwWV=^%1W#E!yu7H6EO#m&}E1 zB)9JUmNvdWM$AJ0tG<3FMV_GzO3uu$Q2EsPdRZ7_dXPF8obz z$MwH%T+N3&4aIrgVheYLW6--o>@8GW2UKHY)u$Yuwtj~Sk7;qL0Aub}wF$si09Pd8 z?O<+j|BFhK&`?f~n-ags(yG`k3do_{t#?OTaA49)rORXgqQ$@)?`M6UZpE(_IlK$q zXxEOS6E0qP2)aK0HYCN?3KOex4h zh+JYbl%~a{hFBzBsP{`DtauGU_mXb;CE|deYiZLuKXKht_S~17(sf$f&hPEg@U^p@ z(en5V)v>#Bfc4q*8~T<6hbR$D;z1o9ij|-L3v*hk!xad(oxQ;)1HXEQmO6MURmop< zgASuLVAtR1)+oTPi;%i_Q_`lD4a;uvegUQ;lnY7LxjAc-L5-+)ziQ5ngxMaVp3G(C zKQa`(wkqid9CWI91QL$ZB!QZ8*yx?}`ADC7fy3sD&xhM)x^^sSf}c#hodSI+bPZkE zWN*-GuiS|oz-Q?{NndXq%E!yh{A4VAv&Jsnyi{VopM*B|hF>8Qc1J>DKPb1$YsV+W zcpY<8nuv;pbS3=$$m9d}Q1A0B@CM{^yUvY}p5^QP*z2|q3GvLA_F z82V^M{7gB$U5%n_*~7(dGQ7uIk*}U~F+h(Fh;y-<4_q3kn0_`{C?*OCW9rq9Ny>zr z-0C6=kI4kMgQVCDH}{Sv8}HP@FfaH^DkZ72Tf_;be8}p9c3&$sG(A1|RZ{@fVIhSpza3lxY^>K{waPRuzU9TTeWsnf1z{QLDvbrI6z>Bvo~z@-E?0@d$%Eicu>L_ z=njll4$lwMN6}Gtkpz+K(<4Ag)YjMn7l*}T3$!(y`&ql@`K8(VfnmWyFB}%iQ$s=A zX1paM*-oZS?RdIDYIt+~`vYeJ^t4Lt8#Q@f#C*AmU7C$yjf0ls`X%V9~W){^h`vXP;o=+aeB9hS-=0hNTqy5Vw4cst!pKeYyGj` zm0)qj@4(JK!(rdh`&N~*rN;*0-PS*J8BOf3Hs6>rwKUXtdG!=Brab62nlnd_%j~F= zmn(GeH=<^fc{7dSDOH1(rY~}WcDp@R<&}2+2_2{6mQ4aJ^lCK1z%#fEw{CsO^*}l) z1*|4@zwItx3AuEP6;ztBrMwoak?54rhVJZ$iLSbp3@%U>K+fSLv>j!g@{HFn`$cBzVs_qsP=Zr zYhI$qdFc2CdN#oTSJ4N+2Vd)Iz`o0&KNf4&*-w`46XMgrPui*vdBx)q3l4~lqXRb@fSkS6nUsvg%?#gs6ljGyE~M z(Ia(e4x$1syX$HE^B3|3qi$dOhaiIF-Ver3dM(p7wLMk;PU!!l`Y=Cvxf;s$>2z{9 ztbcXU&v38S;i#23c$*ZT)2CQA+MBUlrK*{&bbtHeCi2nV+U@lC%~;QV=UYCORC^xWg0kLBzT z;DQnV0s78@i}6m-GfuJb3GIs|{!XP`SUdP4$J()Bm65CKzqDO!kEnsN8_A&=%W5pM ztG79On}eohULIEiPfxI%UIyn_M^L>7&*^ZJy0|8t;_NK;N^uc^l6r=5t0l6ss->v-Xw*d zkAB9@PG(}vsl!iba!A)_^A^-cOBsJnMNFTGS)c<%q?EL}f}J>thb#bYLVHE}@EDr$ z?M72yM|LpkNWs6*ooi7lCxZ~N`ubFs5zx~8SlM43$Rhs!^=2_j3Y!k>O0+07EWfne-iMugsQ}Q3r@&D{A>lFkB z7kIoIcdG@a!c!`=jZDk;zmS53F|6G|F1<~Qy{YQeK)L3n8&Si0#>1uedGE}DtF^7G zy=0_T((C`?b6F18sfjVdxt64ml>U`?!6}x{fkWbO!&T3~NYH0AplyTu23N>v$8bK? zcaUC}5bXOI+Cg|j+MXDb^82NdwupF^zE^EJ#^34t80|_yv89q*1gH`5zCRs+?b`Gx zppL!Tl?Yp8T)2Hgu>QX2AJoQ#;-sgk2ujg0ki4s>QhCnfrKLV8)Z|llU|Jl`TWLRC*w&Ux?iX3gkj@l z4oMW9v!2{+OY{3bU(uS8+c*U)95Q|l<*KFF!ZT6W*r$1JT7kPP0SVacKCgw6CV8b& zo*;a3jM#2#9q~K6bUOWsG&1zISkC+krdqKv51z{NsfXLeus)^sEGCK@llgzah zlra&CUn$J6ImpHHLPcaWlpGS^g3!jIQeGtc+b;Gx7{b9?oKI4DUJE|2Mtsj1G<^A3 zs}}C^i?w}I)wrY-?6hUNuU4EVU&4Sd^r#g^B#AF^<&Lx zc|9KG+XsxkW8V7syTUR8`{|{a`Px>r{ z8vab`_94wW^=?%&z)w7oR7;8dtu=(d>kf*hT_#xBRbTPRnU#{WGqDs;mk-+yru3f* zU@&`yZgOko+iQ07C!g6dtAA-0_0s=wF8y1ntDL+?eh0A>D00Q!&6Ltn0Y=@YaUWuF zp5APC#+%h&TljzY8*mlZKQS0FJeChoC>{DBSzL6wL3#A6C{i*K#D}IE_8Vs3XhMzH zp)%e#2zN0(mLpJsa9$-5k8d$B$TL%5UN@(DvK5ozBW_Q>>QfCtG!aKu)T+KSksuFp zxzazmQJNAdh27TlqCmE_VfCU7pk_T({?Q)uHI9h`A|Jt(-Cu#KxXV(T2qlxpdiy)u z6YGD_Zj;v<_w(z>zTIN>27*1__k#94btBu~!oRdT3Z_Fpty&yu56)T1F%-F$^V>Mo z^^1SKJtX@FZZH)F4mH8Sju5(pA&}<=dIJQ-+t=_VhlqEc8O{nJPNA40(Qxs9Mv-lv zNKcOR9_tyx_FU404MvcftXJb1<$ZW09_)GIZQ}Z5#yWO{i<&#w1MkA zX`s_nbF_4`o(w&jr}j+Ef-U1rLs5`?Cyr=((2$$R4(wRgcQ!y;u){0k$+24`+y&FF zW@D;1Q8YYY{b%aVlzP7lHux^(J?TWF1y$^%YM4NBvWNcApa4O%ai}t6leb-mE0-`c z+H$QwmWGB9^T~u;yi;M~aX84=4$(_Vv~YYg_y2GKCSn=g^Ww}`5qzT|f7pNt&%M5O zgN##(D)2CyK^kE=`7PW`bh%i7G5NjSwFsuBnUGqugt$xWc<~^)w|H(r#tW!{WB*WM z4s$G*wkxOuRpkC_qE~39W#XnOEg+xM-bieW=Vp;!5uQr6`rOkaZ}1m)5yJVyZ|}Q97CAo+ z^SW)+XxfvVZ;GHYx<| zx!2WQ5)sEB&x)+{5_b08*XVj$ub(i;R>0ZCugnFGU-`zCTp(z8H{aF(YT|SN| zIVJ3C#J~W43XZzXU)X6v)$~>a@i8KD!=O67CyZqB%mnCjCcG3bqOOyXgj3_Lb6jfD z5JJ9|xP{urq)ZF~e%y8eDhlExC%RLbs#fDd*JVi1H$gppr53%Q6`@Y`cw9ldj5LYYGI@W zp7N`Qfj${D^_3VDR+2JRC9s)C6F~D~WfAt#8@znT8I|yfK^VKwMSHoU?W(hISZ~M1 z;gG3SUzoKuF258JgR?j&gFqIO4gM0F>&mu)cRM02Rs)W|3+{gIrQ#f6QAlp?=1735 zvwwG>KuNvxW=D-FLZCt=uVvtdmE~QB^f|TaXp%MZb9Ni6@gwvV7`uDrk8MVkE_N0* z)f<~}JgDtYp)450p5qTvFyEy=Jvh&@6x#`X+c+^ef!D-pl2iLPxH>*LvSg{h&`>Nm zPweq4#+|Iq4gDN5&5#%DHHeZB+mRo!)wrG$JMdsaCy@bSB)QtjC{ zM^N;&rcseWG!b@axgcIgMHyjXpPl(ypnY5fBTM529}LM7Te5prIxz@-HWQ!$*nWuy z{37?4dGkUc6so!2Ra1ql;D9Jx<$({$nYDj8v75KTmb0{RgfqSCy}8>1DRa*~yV96T zY?2M_`6YrUj1UlKTcUwd15Ao=&LV!;_EWGI?#W zt&(lxZZwr2h>#{erqf0bI^~o69K`d#CoAGLWwb(D%`>o;@;HI%Uj_ph_@cOtDC)ik=F>tl&PD~-_Yw2CyI%c{i>m2Y zSbgSYTkYnxj%7~I2DC`;>Bbj<>Gi=+5esh5fR!L5Q$AUf8W)XDiDH0STb9GL$>hEl-y~Q0ech%fu zyy-I@%%Xw!yz6wFRIk4mPwX~Bq z1t;%~#Nxp^aUgE28)Slm#5i%WPV1|T{M$J_x8n_1pX*cijg#@H()ag3Fkg${2oo}V zf856RK(1FcP^gk`KHu!WIZN#6k1d)1<;D2~TA+zMyj!|DVApF_z4_|F>1NhrYv?P` zCb5f;L3Pbz8Ev{qf~VK%)j-Ln;1e0`39;2&8~L82STvH10%Or$sU22GaYHo$`|)%o zr8P4D=vJojBsiB~g9y@ASKr-I1eWa#MO|<0TqW_3UwzkQ%QQTObfI_5qV2q$k-8s2 zTPQ!w<=NIYn)QM&;0wSX%=GUYMQ{f{y4ad#d^gaVzF|IMoW%@2*yhzYl1&J2+TR6G zw=xa|P(dmqtHZ8N9)`P=r%ip|%OeYIKaAP9?)IQ}uyTn1pUBRN*a5-b&0Zy-e>)pZ z@{Z%1yWUv->3=<~|LNU58L4OkR%O-0{+sPW}yc z^xr%(s}Bn3;diGV+V!BEaQH5n6v8_>5F0P__M(j2Tul%Yw98;5PJ;_2)$h68i`2C9 zE$@S05l3?e?=t)u4U96KYfzf9!7j&b3j^Ml~t~OB| zMZ3Rdl9BNp_izg(fTT?N)tez^DZgSm2AiG^UccB3L{G2t>tPR%-V%b`IHB{3PThrG z|AjL{C73HnyZ$9^G=-xLH{KA#`H$1?hh8bb#E$m95Y>a;UW23uX+3QCb|A%2-}^rz z4p;ZM_wkfdM%T4N!Vff4^Y#wUftS1Z;#3dNQD&|Qg&@P2IOi2)%6V_!VbCnnCj6+wAbwyK}0rQ=@-zMv~)t zXt=tta+>wLzP!M}dgxngDoLi__u;UBK>Yh(x_20-96H+d>-IWhwQ~sxN`on<<-e z>=!&;DLDlY-Ja}$PDWYXR*#29%(a1?WMFH@Ds5h)ME?6{2cfY<>GAQ`#gA?lWF!5x zyF;hZRxTUmqVma^4T0KDn&<JJPU;vqm%gdzJ=V-WaO%lcOFUmlW_W{@z!J* zm8k%3^G+Ps3dn6u(H40SB+j?agb)*S`K1&v%`I!|^2^oE; z4MR4SlpwQkem)-zZMne)umCI-v8#86?QfX<3A0GCjY~9P+s}b0wez^;ad1D%CKec#<8W`l!q(w8aOq>(3k|(xCq;Oa(SwM z3~_V7j-wR8`A@gBINd(?n_U<^>Oxch*Ogf~^toV0K)M`Ya`Qi4V)?jCayl5r`FWBQ z?=g#o_Lx-)S&D~QA-hKi7K89DCTGoQi(HxcD;4@A0Y8an)W)K-l62CI-e0JuCPVve zJ4;#r=2ND<^^`XPww|LKCT+nNdkvB<+>)3OIR7nsHN^Yc$4@(B-uZ6xD}Y@3^GM$J z;@=b8=|X;K&FdRhcjpbAS5Ic*1w4%<#?q~nyRiXDk{oq;Mw?%5AHnCHHQOd|TBi(m z24jI+bqf|#{9|uCfRYrb85vU?VM6p2%4NH;iNK7^+XuLjVg3+)gp#J6!a|`fEI7iK zO}Y7S935&;PDuF)4GP?yOW2(eFjgO;h%iy+k{VB)Yav{pHhPn+vdiCfmKZFrVGwHk zip7DNwdA!i@h9zxFKg5*5*k}yusmtr9Lz(eL&aqcqI#Nv0ri4T>cnHj6;uw5qpBph zvC3fKOG;Vp&&YQn=(IqPxA57%t7vZtHa5(wQ5ik12D&HufN#2& zeVk1OaC6}DvOcGQ!NM{wdQK=p{bvbB~*oX2#WV`RBL=cyf)Q2vfw zH`2;$LVa6!vv8(*iJHk_+$AF=gnWrHh3Y_0xtl6+&AekV3dqT@*HK*7kDPCI|8x5G za(SQ#y?qIc6TAMQB`_4$(Z4gf3IujSJ38)ye1_g%F;{f9iYv$r)B9+|$clX9ukf~f z1rM`CxUW)iW+$7_(U|^-Q}PxAZOHhn^A>PPvX>T{pR%|ae^iLl|NUvY;=@pc2Zt=9 zfeNYVq}Ba_b+dgPOBMuwg%gnTxP7`H_N?Up^g-^_5XB!OA`m}(8h__ncNTIhhFmwV ze4VeSQHd+DdR-gWCFY#VmZ}~7MA%86XxsPK=2LVm8C%?YTFFr3hlGnIV90z98fSRY zN0S==)sy1Ysk~c)bb2~g&)KD0nR}c)FqT5#yV$QG?sHcE@aV7A?b(qO(La&MQkSp) z(FpEk{=YPW-(rL?4Gsn}&+Xp6lk;1P(4egQ3Qitnh*3j}_7XRDWZLVb*wdJGM0C^9 z5~pOzMno4I>EIS*QK*$xOd>_X6Jp80TH`^j6fVr-=J_`7i!NV1V=+JgbY~*jC;40o z`xbXblp(Ml+Bs}bHLzy;|88bjrSVmxB@wjZ#I)ocQCizD@rx~Byc9q0`lK+kV%MW` z9T05L_IKFYNbJ_3K=67MTFkCiANrg=&i?(I-*MA6k@5tucmop1grny);W(0x9(Oj+ z^HJ2GprA%!{2Ik>kuJ+GOBOW00=l)n4DEB9nPJO_?A+fXc)gQwvj0*vrDQ?aKKrGT z&>!yg3`!kMi+vPMW2m$l#gfbum-BeFqrIKYxK*(zFNT5;Q&MjOQS?C2GMZ6Rm?D3Z z5P-iMMT{t_PJb0^hDv(AMNx^3b(i?wd?5OyF2iBRxqXe$N|zNS8`~;j+s&MRaTRHwN_oU|@sfT)XY=7RZlZ#AXGJUDRMqO* zFr4hKk*7_I4jMS?l#YKYIst?nbk$#7Bk|v8OKiR;N(b{7Dd2zBFgGL%S?yBFNZdE% zUQ~TY#H-R`;@MVekx$jh2yaE{B6K-bwR{g-zl}uZdo$|o|6gpqbyU=0*ELKJB_%Bk zEe(Pw0>S{&NOyM&NH;@wDIgfMbR!+o(jgrJLrV`LFvtM!5AnXA=ULzPkIUtP@tP~o z*=O&445<&EdZ8#{Td?)rJ6z!XJq(VdZqGefJM*jxLy=NKgOZ=hVL`;s==WCrTe5L! zmX^765}Dz=Q+aa)ZCl*o&y_yu;+<{n+7|1LW zEUGc;L3=)i#(8$<;=(m=zP=ia^BgeDQCVQ-+iyMpwMzD#3UM~jpAuCg-q7NF2gjI%ma{!{lUwty_x0J zAu0ce*pxa$Q!2jApapJL4 zw66Qh#I2U*8Ry}FTW^K<9o3cXUu^WwG!K(g=d(R;9IBS2!V+w?f+r9A`}$~4dmOz^ z#Ps!WU?ydsKSMA|28>je5B;6q)#-ZZ_@*EI) zTAQ7Y$d9b>?qp|6MX2BneyZ;!0Scw>#4opx`E%WCCDIEm1fW@uw|=|7#2PiA zAxX{lPL4W#f zP^|R(E#s3}^Khdg)lMl#3FvHc5gBjG#-2~Y^+^zk8S$zl$C#_2?N;B|HI0koX}MNk zI2-BM;!Bhpb5YA=UM*l}jG+(SW}4@t#XYGjILNo zo{0_B;Mf|3ekO0%1?H5vcdy07wJ-FF||!_1z`)eQ?MV=Q6A+>`~e>(u`d!jwJUuBxhN z5=VF0eN?+NF`6k@`9iJ3%A|{Y>(6WC9%k0~^N$!*kG7W%X1RTmLoC4vn;)AghUY2# zi*81hq_qcx_?wM-qE%ZfErE2gC^zrcU;g|U%jBQ2o$d-P5_{zc z^YcY~!5$;W2xhM4vnaYhmD-vzDGhT<9TaNb3?wAS(N~$T#2((?qf4JM%;xy2TR)#l zh)@Y%JyHwuK$mHEZB_Jwq>nbFGOvFYZ&UQT(e#bQKp5D^jEN^8&X#Oz9;f6HS}+Gk z3D0{|m459;hbB+iu72EjtiLQY%E0|3*hQ@Y+vr-+8Zrd@T*EP&hIXacJ^OXYA z+nC0$uX<1wx^7qyI47SlwO^yO|0b>6uWr;Wq5S*slpyTq=O3tZd3}p0$Q2JJ>?UsS z%&d1D)QA@;=Y9{k)5Z=ADBx)U8p0~s)NPyxdL7==>1mjg08ej6b zBAgVt0T=@&d!0yBFc5s-2gh_0+iE#Q(Mb8H5^ghVG36J{aWHVd=}-HpyXoC(Fpz|4 z8Y~&dmWIeT$9|D@$h0K^CKCYwqAj!x)~w1p9v;4Ch0xL%G!`-J4@U68!j43mea`@K ztjAO}V8prQj8`j?29Yz;on`>;6D{!dhOw$7TD1$Qa9Q?Tp;ksf<)GCnhw34{zBG*% zqc|lbpA$5+auc&^7TTb;uB_jrZQzT7%8B`=<7=Th;Ln?54}!BS;Y`i#;F5u~;>fMm zUqBg}MYZgeARw*p$oe`Gh>3U~xC>H*(jW5szyE5VnxyD%#S{Cag3;5(fB!~8*3N*7 zokIAbSIiG7YGsK?+wi=+8={LLC71ibUZ`pb{CJ~r>^x`k7t1cZkKcd-=f}F4KqA418PdaZ2WvWGh@)gBY`IIAt>uqbwuhp<8}{kjD&E$k_$L z)V|RTAQcK`3=Cwt%~y98*`48!>2xk7^0B)@W?!28dC!!U@Ho;ZSSx1W61c!Lg9)SW zNqb7=4~OUQU<|lRKD<#U^<3?bvn?bEHZsKcli$`glBgVZiK5aRrR>tGM^>97=?gcY z+&-uzL%y4~fgq?m6YU$8Z4V1T*ZOBL!Wj!un5?v?uQFj?FB;Oh?hyiFa~yl}qBat> zy;)yU@uVU%NitXPMhbS0%}F)r&H~3>&Q17wub>KPHl?2Rh_JSz=GYgs(Q$rnACfx^ zm#tvlQC%bbEWfYqb1W=PD)N)wF-o*m8MghCgZ5wR4fJ{&I_Nt%4C!g_d&09JjN8SeX4`QK#%220roWkYh?HW0S zjJ_qcW!U-daL{j*U50rPa7W@oEN4eI@4>obu+j2o#|3V=Az1#s_fZ7J9WaUqYmIT+ z(r2tBk7Qp7m$+uLY{12oS?Ru;?3z|J#i z#?S?Tm$(A#!SRq6N`**{Jv)yhCJcO@_9VhDvfC-GaJN!C*qe%_p<@z5zMpB#e*lbh z9A2ZS>)?I5yagK@UIXYu1+bf)sl(P7q&toKke&fD_Ea}#z=OOUU_SSj%AaxGp~_Cc zrtwJiczIAqL};~#ao-Uu6%Z#W%zN}v-XB?>MxUcN^Y(AiIBK@=F8=~*KZQMW=0bl# zn@)fORi7b;c<3+qTH*pDo7Q-&(Dx)T;KhvCb+0caT-Gq=TOVCcdhZezGFEuNVT=RT z09F)e&+HCr%mTP~_lH-rQqRui)uZEXo&>co_4J(cp64H5DOG;cI446q{({KceZ!9E z?Zy~edi%_`&g(y10KYKV(M*nU?HO_)a=}OJ^vq(Q2ga-aZox;K`VLqeDZm=`%V&pv zovv1O&Gj)=RSMu;AE%~=7E4uDcB!27xA?4dsdO2IYd!BM)MeY(nd2})2kJ<#0zvob ziGArbB{8qd{E_|YCW)^)ojrDld3|vnfH~&!Vl4O9XQ!B$;3lhATgZ*zGpWxdmXyF< z0;t~}3wg7N@cNP9b2iz^C_}x+tFAWV1sXd!)#1o+Lnm(Yh{ETu_ zl7BvtkJ?V*Go8Aj^Hg;d?Sd0%YI9BYC6hJQ;pAUV?&HT=P6cPRW7}Q_09S440f}!b z_{c1KD)eiW;<2{w3f040OEqeS*;m9 zh;MSHw^b#t&F$~6>D)uCpWc5~B2;;sg$I+)JKa6eAF1wsy6*N}8PTtBIpxYxvpD9> zDN8R}NEUr~tq=)(Xx#!n0#JsZL@hXU20`jF<)P6>>na%rQa$aD5JzL9qX?xJ3-|dX z=}Hd)@EZ`SG$f!xeT}SWHT<}N;b?zBh1zSa-hbx}RuKdIU={ord3yf*NVv=Hm~OxA zpzv0aCdcageAB2F@lZaUl|cYa6gvFaQku4`XxDL6%;>}N-?Som44tey$i!|j z_D;;){QOQ|AF?$r8KAhX`Km!l;9fD_)gG@98jZgD=6pNsd;g8b6GcL*2nKgRo{DjBtddMY3g@=0`~(n&F=X_wkEfl z=B%&P04Ti2v6sGFh70|cg?jV#h9?E}uft&-!g!{GTdt*f+CxR{7o*s8R0j?^3?%Z$anysW z^{8C&jfS2ltr!((t)B;aRyw${Zk7M3dEcdo6!9!IBB%A|?uD@*)WiN#UrB6A4D))_ zRJB&6qHp~wrp)Km)X%VSaa96GZol<#ot`lC6F#n!-J>G?)Xu40Laf1yS`o^m^{;B| zOsh%fyGEjL*@LW0u?PE=M$!x^GRQBvO!smf+_-C z46@MO($&Z%R*;9<9wT~w;q6yr@;!53FP2Z`uwbU>_Jmu3t6e=j>K@5ZW892$AGv$& z7S0Wwu$oP!6`X4LlfGfy_`e)Yj9)~WDM6Z6nzUJHT!X0L8DEubCMgEoAS-*9)& zPx8UkHoM@jm}MY{Sn0JHt%!-J7Wo}v+Wd086=;M7NMXT%Z=cub7>IuqfkdK}PH)E- z8lME4`vzNilSBJ5>Od8-V=~Q*+yR;1G9Sx?O1XVm8p53HmV?ba)$<-b7<^8+BvPjJ z{T_J{=ks2`KNXVk*l+9EvIeCA?@Ffck!*jn@Acu7)0hd@-UYJo6!XT)`x&(0iIQ{( zIkXX+;g}Lu(rKdm4dk$OihXwKF`L>w;yByu1>R@hg~;sl#amFqhddGcxzT>}ZDZCX zSz?LG6G4!#_go>se4=7Yr8%yT(psQ)EfdEd1U&tDOPzkY_&9AGbK;FwzsB=@w4*NT zc@}iP723xDoxDCI2z1?*^2+izk-MZtr%9y5tc10N+&A4uIq#?V1vx?P*eLgWshCZP zJii^90e-di$t9=zQ}xf@Pyb1Xjr!V-{c?G{WIY9OKQ1HsGr)NTOh@R2PN;>L2^nktwrPzZK^W_S6%p`8aq=Y+ zaCw*oo=2I%AIDvAe3SkmOo^S)@kO1`dx!Nv@ zLeE;O*)iI_x<7fDC*m5+KH6iUlk1E3q&$h5t~5oum!!#XdO%b{qC;>!B>ZVM4a}Z%YlYhkw2>6szK0`}GB@Lxn}n%puC3 zoI_=9?AK@(&oE7)!jfYWlR|1#^(Vy^AL(@*rO7*vDnTD)eNLJ+d`ZEe< z7+HdI`j^Y4Si6k183{>oT_pWbasT^27$$6ASo;*&EfK+Xw73(nL-UxABzABM$E{4; zP7_y}8vS-*CGwMgRZ#8+@y}MGICPNEn}kZ>%8XdsUW`ySy9AF#=VW>-Dl@1EZNHz zdO8V{;gbq~5I*1=k@;)vv->OEwwxpw&?2yBDTgn`?+!NaB5?3SiYxN-Ur{?tiF)Yv z5APu$58H4K>-A1_2A{9pif(H`+de~hQ|J&MZz3L2#KCp% zvOMM%SKL%h3wJ`y3JRua)#H(~c_-eM{IA%Rk7&AKEBALA$c}|u@{$!c69BL2RnSAd zF==MZx2E=ag~f_g7U$x4*KQL2@T@^ch)6T4e|; zOO&mq0oM*Bh+UM{s|3ur#W0OdlvYP7f`0mp%uBXWOPn>-ME8USml8a#htz0vWa|># zYPUKMvlu*VZ4*zSL!L!ySTy|{C>op8wnRlP`2KDhcyWp?AoR*#S;)P3vej~%p6tuo z=qp-8PCIqW=&xHucDM{|f9MW`a2c61#EO30VVx?ok3uk@d)cr-6zjv%?^?vb3N?BU z%%aD=u?fY(Zd1+YCR+T02o}L5K_(hos&a8Lp$yBUWpIeRLUi{+2mvoKxVy5|>Y|Og z_c^DGoL&_|`yx4gbb!45R2tzc^Gb7;R@TS6zo`Ok2p(#ZYc$gxNOPF>4U-N3RD2sF zeB?zoIGdd|?Az<@u=GoG;(oncbO;xcz{=z3S!K#-XHp%uW)@+4?@DEKA34~Ygwx!f zqXy;LRP@%sQJKTNDJjPnQLa-54^1CW+6+=;V->e$BH&t`Egu4HO5o zh(g+X+oNl)T#~F1ope7tgkc$fy$gvD-(9){pAvTZMpc-9JbFXq2F4PG-;&4B%aBs< z8bjZlTYZ9}AZLUweIF6|_wad;>7hWg!L0Ow4Yh5Fhm=a-64|PO?-)PFdY!8(j~>9s z7O&kLls?VZ*jyXRd@rsujY{}B0Z_kFr{2jkI$nMYp}YSCgLL6#@ZK{W!k1rZT@RFt z%j|!sq=*n8I}o0+dDXEP?C6J|=^vRv$vbnYSl|(ILVPcT`t>RQw1L7JKyh0nSRXs1 z;eb(NvKSrFCScU){jxry3Bvd(S!)D93q#uRXYcD+CfOc$Q-!oJ&0W2YPws^>8fjkz zs(}KN0@ma00lUX5l9? z>7Rn`uychnQR23|`Ler`)#Bh?Y&8#5C#%Lo7|4-UnKlU+@QQBax*cnZVbvqsYco%q zPOM??0zC6SJw3s6$gf0diu-c71K1&S-MJodv;=IZ1%&b|0)RseAz}no*-ZejR#ej@ zL%VHW`jDGijC&nU`86d6*Np+#ANR(J`!7E1qP{w0m#D>`LAQd|8qYsb=F3NWc1?w= zbWLOOYztiUEko^IcMF?wZSTtNcbbAqa{+OaH@fhnD4X?^FC3bQs>yWOX%RQ^4#KM2 zF>Fn*+)SQqmHw+OpFCC4e+*NZP#x{F=xpbX6RLsXAmM{a@wmJvukerDqtGb#7JXCC zgoSa5Cvz#r`-17dVi4aKMVYT=prUMG3EnM#S9thD{6!u%+}rfeSL^@-(6zz#j$Xur z%v!ex{ys0p*Px%N_<2XEHlL-07W4bnMPWi{z+DaKPf(EQc0HJp6&$q?z@}4$T9R{v zTIY*?YLYaZkY-y#YoqLpz^Z6olu>DXtuH0Z0#A&=ok)!=8WS=mpIck_V;p{$ zsQ1lcZ&Q$v?3zrRHajI8Pt^ZUL$njb^o750UpW4+pUn3V6jRp@UOgf0T+h$6+d8b- zfACv9=ygmX&D+63tFNFx23+nGIcfMR*$bxUA{l5vfmP@gI2$20Vn=Kesu7=l6?d3; z*!Y<%;K#?S+l0MdB^Sou3@Y6Y%bHxn{=Gk`gQx3-@sT(2sTw?}*IBQj4m5npnwB(O zDVmsiBSjxN*(-Q*yIa>7XY9b%SrwZH%$y@Gl_|@#EtGaa1 zao{>DxT~&Hf!cnq4}Z=x4y5-wgc;#GApjt-V@G@Eb+j4>G!?PvA;}Rzt4>EheLv?S zF%LVy#dd>Rl&O5J#{wPqLn8TTgw%mX?lXB&-b*EnM`-F;mf7eG+f8T>MIUZ(kfN6` z;yt26B(^fIOC-LXUi%e40!mdqt)gCzt{7X_| zR;HLL4|mU%es^u2L*E7NucNOb$woupuz6aLWC)W_i_z3q)7s4c_4JEfZ0;h(#GOJ{ z+4QVD+EPjYi;G3x!_0ku9w~yz2dmZ#>8M_dLvL+h7>Z{;ew$8KP>D)Ww8^S})UNit z+;{_XAuu4HojtMgmuDMC{XyH%o&;@@on}NK#t-v=AX<`$jLFW;(k0)3)QYIsS)CXr z$wF5>F`9(_cd7ill~X1Htv^{?9kmKKf5C~%Y*iw;MCs9;-8&pOd5!1kFq&Jx*4P;K z&t-t$@#+arb3e*B;rLkyWj(qA7y0_fa_&5^w`>zqKDp{gZ$AyhoK>l-zA6V+O}`Bi^ES%xGJ{Hng&|%mVOp7{UBd zXNY+{d~#|f=W!BBmwXn$YmjY|WzRC#Pk4WL@)MHz&>-*O1fqkmU}Dcvs?o1c!sgeh z=0V3>D-@(!PX0rsI?cX0C%gVs3F1Mmq&dt+`G^PbA@f!v2Rxw8NZ!d5L4}QkX4``S!&^m2yxM zw&DhoR28htTldbauQ~tFnZJcBYwmNMAV>*8MrZ)#7sfeO|2$&ix;8hZ2H~&bfAV-= zmp=*XBs60fj~V%HriWj=?(Olj?YW6YeRvFWkV$ecZp+#z=RtI1NL(ZHEiRMj5Deo( zzs?r>^`k_agG^|^mFMXpT1MZWANsc>So`#2xBU2*SEU_vdf8-zl<>-;#&F9m7Dqge z=moP5y8QPhwGa^7jWYR-P3sJ$Ay+uS=#6!ndVp&C7Y^~;&jVIq z+Ppoy_ol9PImqVhlgAL7S<=$HuCu7Z#$`*pL21%+_qt$vn)q4$c#`Q*&>M9lEa-_G z#%9~nDb@#8e*H@9W&n;J)DN$U`=u|ve>mY0*l0`zVz#hpF%NS3&trkZH zeQ!Jn^~O9hVr(GOg2(g`5#r{z*_OFefm=&c(?#_+=nBKS|57s}8(n!d$>(g=euS=< zbP^OBnR(tDM0d}$OJBgWgKk5_(Q)mG37+qTV$g~e+(La_f0PAU&=j=7``Ty^3%NH@ zHgK+EZ;JXlmIk?a5S(*L0lN7!_6+@+<&boZ>-zC?nXjYcJxx_kR1bbHPb~{O1h*ci zXP$h$b)_l+z z1$?SIwS3fWbN=hsTN9dfUXEahd+;AZo0?1jrFFKku|X8Ug#CCdRiQWR5RHk27ss2A6;#D z%dQ@Kf7)1v$X-PglUQ(pascCO4fX7nk74#-^bflG7h~q(Tjv=s!%d3BZ+&JQfXh;9 z&btwsSl+l-k8m3&Gp=y#XdiDZ-G6k84}G}JJ~!|4fyOtEH+Ta0dV(NJ>?Z(vO|`nL z(EO`{l=JRX3;e^}`+blOo&a{t`~9tudk+>;u~zn#rp0kzNgot2^{$NfpJm!UegH3Y zz+T^#FJz6$-|2YH^us_tYiWY-P8K_2ei!ri)MaP&Id^0!PXizQTj~y2d}{4>H7Rs* z8>Cnm_>_a{4oaKEAM;8904jj5f8k!;t&aV5`Kb7@4)GJm!#mTDFe>hUf?KP*Po`O2 z@;-fC@Byd`NIPZid(+LwMiJMEwBb8=FzkT5blz2m^{xQWv}`exC)D1koU3ypq+Nrn z-XGI}H1_v*?xWJ(uP=7A*LUCCu^qDx4YPIset*T6BbfR$1d(F)PpafsoR7u4zq&UG z@RtpT+jeBt;d3JZ|C;s}H1{G4RmTT=rhal*;P}GRY!2tni+OEN4j)gCH~A)IVgD~# zCKhr)c<@OCmVo2SIGRzs>PRJDSc9=x`jMf%5>xk}VH6fp575-M!r{bt->Nkbka4@4G^#-FbZhmYOM*1DSQ&(oIk~%is+FZ0;q&VD*Ow$k@Gem`oeKSE3LVn|N(RNhz6=$-pH9{e zZ*o7g!#jD3P91bJ-KLER@$kaMzc$zM6`*cmWo=nY2s(ShqfAK|ztlEZRPy#B zjhf2xxDU5DfrE*iH-ktNVPVB(@CF;Mf_GR^=2W_;{65{Yj^pV4mSojf{U26Fr<8hu zW{#Hk3Q=e`?u%M4ZUP!qx9#1r(D@cly!ALzJm>tv1A2E&hZ{iUe@+`;dkLKr`;1f+ zN8b8p8t?!8{#@6;53GSdfC5Runf|z8wyEoi2TRv#ZQwpX0%%lprQodRiN~thO6#Q> zrt^U7ev8dv{kOXEHxjT!;)xYjszZi6$ltMAa+$ng13Kb5!-ZTLIA0Fu{!YIP*UXt> zKP<>@uywR^$AsIkGms!dPN&L1hY52@(^b4pLScP4mkYLTCEv7BS-U9LQ6t^P=<{XA zr@8@(9vQI*cl%vA!2Q4Ffb1sm#frrR?aJc?54^Ks!oWu`xJQhoC758TJJ_Ikv8F6| z?;WRKp+!S%yy|#AJzN%V$7|`hbC2cWhw*W9VFq8g>`>LaN;wP!A$X~R25l)z3#|tq zd#CX*dU5HYhLFAiwNdKQOJL931VxQ<&$2Zd1lxaV{-Q8ckvl~}iRk52z^kOF`V~uw zEgmDYd7hG7!LsKUu+NC2;RNIssA=rYH4XHuwD6@a$5(}8Vjyq?LcV%(K+x)MUm*Hgc|A z(v{*0`UIt;GSF`=v3&f-w=nS&7dECY$&4tk`84bi{Ow!^-OYavN9v#pmX>juG=?fL z6};!$jh=g3NPaH4f?{+dtA4xfZw=I9P@%aozv8n|?*-G1+I#*mpC=50n)u$BfkS>b zu7#U;o5W+Hc?_-|Xh|`JcG9t6Xjt?(b`v-WPT)qPKvVD?!egX@S3cCNch@NM@qJSn z&L#?Kr#EE$@2~_AvNisZ=~5e&GSV(r=K2lgSa7W>NPfl)vn_fLq-xCT(%#cn$DaLGzSROJ=3tPQM?9huxUuq%yC=h z8vG=H%|(oOoI-Rj&^GwhMYBOcmPlDl=aPfZi%?qZq6nC%)9{pgrpUP}&F{b`r-=05 z4|z!${$rJN*h7YXEk#z&-L;y=Yl2Z2zzA%u#wo@h5=9@X&xRJf@mwN%i!e zoS?C%W=xa<{1^zPFgD(}VV1k$0dw>nwqlVr#j?6`yJSSk`1V6Flka47@UQoodxF}> z1YdgzygRD=cINNOt8k=+ZY=7-6Q++}EpUnw+q-D??>^pnrAz^A^4rAgFKNgMZcu6Y z>&>Jv7|Fs@ocLNo0j_pCHxY2w{{L_Ruo-f=2!04i=629AbnBRNSa49u<+Uyt@?M*9 z2{tzpcfCfV5eozsE-0OzBgEOOs6^k?nu`GoNv^bI|D|xnB>oDpxhX-7g)}$J);8VV zV`cIGWrO|iQZpH>&nPe}V+IxOJmP#*>>ZQ#btyl*B{1WZwX>4KmDckV8{UT(jnQr# z!TV*3EE)@s> z(Op0<#I6-x#xJMut}0&7|1UuUxT-J_d`(C0iZ6G6H~EUIU3eEN^=iQSyt6;e1k2yR zy)Z&$B7hqopoC)lXh$ zOouMF%0qznyYK(+L>1Kn6XVQP0nn4ou8yO?U$;;}m3@}aTL)AE7ObA=Ea}{f$HxpT zC)(#qu3llQK4H#CvV}WXb)2@l15$01$eOvORaR@&{!i(*w~aywgcGM47P(?fc@%7) z(t9$;lm_?$Pj^J~fA+NZnjG0sFO<3y&b8Zr-^fRW)+D&b=b*-8K#F)kr$`tm4)64O z#{Ju?v-FoO=fdSr;had6YXw_NHuzq6whlW0u`Gs+?1TZ+!D|s7(-WMvY&!NL1GrM@jHsonf4nR) z6a6)y`YivdGrPF&YJPs6xOea$Yqs9!$2(N$C(jwbSM@+sXkzl5Ch^$H$~DP~{>?p3 z(KO#9KGhrXk$GX`;t2y3-Cr-WqkUp1Yl#$jQVgJE@qHX=bL(jetcYM z&fflQ+CHFP#M^$GMXnUW!?Y}pS=nskBkKTlR`z!Z`b&Fic;R$0DpX&IrFXB3`sM@a zPIBKzAh|4+;D${+yvFOB-0cMjm9YGH1zp*03fj}vEUGEiuAUFD|H%tMR-)(4HsGGOv%}?n=Baj*t<@z&i-2F?TnDbwH z3q!8UDKndG;9L;2B6h```|etYobFh5WC6WAi0>iplL|mybwFl&ovYvdi`oCU|IvcA z3yX;b7N~EnMgU@1KoBps&Gqk01C?f^*eAPxqR>VwrUsxJ?rnVgMjD9>zB~V{|9-YW z<38ay;_>J|+IWNgelsex%=g_hTSL*}R3Pcl*Lp7-Mr_$}LsfnqF)4Il?sDvbgW~~+ zh%#~yefGWCKa4Iv?rKX#aEW;c+~reJ|2r^S-vHWR|8eQ7>wA}CR?ciW0BY2JQ!Ri{ z@r3X0Zc64>?C?Rmz#>8d#5icY? zt?~XOXps^;{8{3rn%yw_9d8n`$mW0w)s+K?dL@~Cn1DFQ;`r|xaRE>1<^u+>z*(uc z(nGn)O`TegL94){YxVHBG&3tEW-#)7zZ}|4NRsb`DZ4a-YimV`MJx8jFVn)fG^=<0 zHxsrEy?xd1l9DZtd9m5+cUS_3!W(`6v*-}kzad~+x(i^lOh+yUMV&-xqsib+k{vZ zFUyp+-|JZWSd<3{8zxElwPmy=%{#M{q)9rmqLXxueCWu7X4F>9sW*AQ$)iA@0&@$5 zrhH=*)_b!+`VRu5xe{mg^cBH9DD0H~XrEnG1)ap8XYe zP$pKoXv_#N@uWn!Oui=RXlMJ(pDwxUdOH-|{qmybGPC!83upKK70wI;!4S$0y19%r zDg1|P4N$e9b*OfO@TI_vcb`$mb7NB_bkvn&HU^QyYzcwFR!$=>2Cj!aA&>`D04tu9 zQARp#Lc6SDylx*;i+;i4nTY?VCIK3^3HGMXrRu}XW4EIvp4L^4DgfWgMNN(Pmj?eb z=N|%9l3S2(O<&g)3?)mjtZCSm2r5`_PMtRk5dOwG)6iE~X{8fb38k-!BK?F+>u>`L$%Fr7}7fh}3H-KbpqHb>Hs8v>(T& z?e=vAyLs<9^iAKRTuU?LQV-#LpX#=#ZXjXPu77Zs!YlBn$y{IQ|-fv5mKwlRLAZ6qSR-*_&@eZ5oSk2vibcEmjT@odW zJM`f>^4F#!!>8gcv{PyJcj6zMEi zMI3U7Z$uIh<0xWBm&)Eigp3jZlz0UMq)UoBE3Qq|47^YeKJZ`={BSAl1&UBto! zYkw3G8r;nb{Y~YkgZ!4t5##+h5cDBGAQ+(kI}0qkyJ1^!rdKDS?zl)JCoETv>Zf5a zY)6)m6?=?uHaIU6a|QTc_CaiuZry;|F2mmLs`CSLaIROV$05?S%!^pJboK4vfOV z)&n6RtV}G3ZOIu>;PcgvYh+!B3C;jE(zH5V>)%^6W-E6JYz_q_hv?63?^~c z!51C*u9QhP61H}22(RTBs6rr2RL8Jc>;Xz-hU=<(pnBXk?Xhb~jMu4c7$tnH4y^l_ z&K#$R$q5iZD2wcg)(apE9>y+iw4&%h}J-oLIj>qnTN zBPhaex!B3yrtklUi+z#rXWr>t7xCCvH>xWg(o=&4fCPF3gVhz_)-UQNZ#FHNgP>}1 zTd;;0ki~bJOR`h<2jC6;3hHCAz)$gJK=wqO2GHli%*nj~uhiX*w8Ll+Wcjki>$SHL z_>&lJn#$fOS%h^cRPC@&=a67aq}D_JZ4SjR1P9nppQs@94a#-Y?TB%Zc#=H2K#O#^ zr>*_lBF@t375mASY8`@TnCxg4d2Vs!`@VLCG7fa4Z7#kui97gFX}+gnHFY5uG@ z{l~bgCT6jwy~Nf9Uw(wn&}%P2NRDFE_6ez9>XTcy+zV!#FRpO_+}yeJd&B}@bl*|pfsE_RyN__x z$Gyg6HjV$GM zT;!rN;aVNcr_t;$WxaSzXy}fo(B$UsUJExX_!k^Z6%JLRL3Cej9Tz&WkX74RpR#E& zbGz;AC*vMJ0MG8S+zx161?^h7`aaa!Ut7nsI{mk%GZsvxEW!!5@-~G&Erx$rphjBKgA`6nUvyV zUWfQKh2%by&#jm8E8{W4fmiZaK7s30R8+v_K_{3M`O2BMK2XzXEb^ zzSR43%nB{>lMm>(zl(ub4VIr(s`vpeX7LB+1UTw8?n;93fT{aKV8tfMx4aU!LGecA z3-8))$;C?eJI^yUWqN&BN|>tcbYu~v7G$z92s^Ddik?y}KZm0Y$%iRKL@*Wdi8(En zpPH3Rw1XnyL;yYLA(aH&3oA4?z*Q7{q_eTjUJ6=V?^+@dDU`EHOv{(F1JRYe!mlto zfBo8C?~cAZ*JtMSC|ge~UV#cHvUYX_98Drzc}b+gBdCHl@%stMKYH#miTf!RcC`wD zV)(I#k%uNVgJHaergX$AK>C7}MvP9cw~;?j#QU2b=&*|an$QI*1Nu5~Qgo30ra-^p zti^>*$y%P??aaZqu&aP&F^t{bV29YgixT9jv$E@M&Eu93l_!k!4<%8M@{;>H{>DTr z@AAKIq3gRXEeS{>VJ69OM2IClKUlM-KA)?kc*ggF^es!pO|!|u^7;B{w&92}PCPf& zqblw2QM}LR8u^^xWj#rlYs23zy{!~gJ?+E9IU{9xz;u=_Ls|4G$#N&$z1}6=o{v1f z;=cJoHZ{PT)zDa6yFYwwU-UGRlGhvPUfTL%zVc4b(DLuG09vwwY>`wTu9yFz5jzF% zw|%P@c`&tyq8sgu`UlZ*-nqAlpLOAcZ`OT?e_SJ-Sy*$BibU z3il!j6JJ=}Bb84&x1IAiis-hdW(b5=%F9q94_6Ug^)cfZ^SeK8Pm+!XU_gLb8QlyQogZ{RMb)u};pyPtLC;}D2&Ko-} zP?1?{Kb-$X#UOo!EmND90Zvg6_F1$;k4r`~5G9ev<#KeXkRV&s&8!-YZXe$?MaxiU zA<{BS0}bWg6lF`KZdfNB>xQIm;#tRmdVa*!;d&> zS5#ymL!%IJ1JlolB4K@o+?!_4b0Hn~vlM34d(&Gq64$qc|24>mW8XW`!goNq@QffkuysaWcH`3Yl0tqZw*_#7#Y$r3NQgk!_s6~ zM9y!5@Y?%sWr&bOuIwM=f-2SaV@Cq&Q@>NVLQIWb?$vkI&=)YX>_nIChV<|+J$@ds zz$6o51S7x_v<4M6_9h)LbkL>1Jcee1bZG%{DK$VYjftO;sqh67f__lEgWgZ8pX98! zU%=fE^Kqhyo%wq!ps|?UbUvoxLJxG+o9}3s_|W(fOmnLJvo7<~p_l6bzxJ7P(h?mC z*lZX=%kUQ(UM zPJ%QsLa?Y@V>KG}vx{uF$?0#y&hp3(IwsVmp5wQ%w+mVyyM?68D+O$T}JzMy^I|JASAI3QElPHvOMtP)EOY46hkI z{P|hE$s#2ceQXD$_VA@2o;71D9p!%Mxin?67Ii(sB8+|~+yosJ-iM1545j5K@1P?! zhY*}Au1lhR4b+qnl9X}`SwK^^``tSF)#c|aC_S1XfAs-zQjohjwivxc2!wtWQ2(J` zT7J&sl7NFs)ynA1UDb1j=J&#};jxz>fV`=qQ$<}eB0VqJ{x9IqiHM&UfF-JpRz4#| zyLb7UTQTJ3jkPymz01xU$7W4r70+XG1+Y->)3>46NK+#DB$K8--jjDlgC&xYpxp>t zas@%?Kckm4ueTgrp@ptIII{M@tYbNC8TslGXFuFcUm+Sok~CV$!G1fwP6z3?lO`C@ zZ0dh>OR1kazk#|<4k5y~hqw1rWU#%POGQ}VKXCoUy$4?ac~;&h6BjlD0Y8-a-y3T# zI`j~7t2m@gu=90rh=ZLu0ag{3MJF9;$mhThn4N^n9XX^704_*1Aig?ResocW*2A?f z78%(B!oLn!MZR^UMuUd?i&hN|2oh*qNG;QyUiLPgVJnP3SjR;M;#?lqvu3YEfuIl0 z=^P*Q?y?#nyJp%+C!rzJs*Z<{d{G^Xe=%;m&-(1G-0RC|-ZJcii5$MI_?+WIs!w}e zKGg?`RFYH`myBg!0k-E>O8-w67$J^?`mpNz$e>=8xHwV(+b*;038Cu|CENUJ?MPd* zZ7|LJnR{3;pw?I#pXf|dS|j6gy#tQCxbRNi>J&rl3a?p*3Z6GV(c_=H$LA&@YeM*k ziKfSU;c~Hb8i8<4NgYxq-FviuK`-MsH70jCriz;<>=E(e>AW|w z6~MlXgP2vMdH)e(Ik?}x-a$vsPo98|PepG0CIu9^>83IlNBnm#nS)K@>XcWxaz2zU zt0)B?m=-cV+*(dt-3gAQF@s5Fkm`pja2ANti_x#D7QC>D4kU-M;>-~_!je(sYnUo% zqIeh=)^xxn<`Qy~xW^@Gt}0meVDW6=urwL}>bot&-hBVB75M*@ilb2P?F(hv==OA9 zd$Hj^e`vMlohN1aLa^hDTN(IL8OsJK)SoTq*Ek@p+R7V9Aq5c zjr!d0bG*;@vHw8!fy3VW+E=ahTj$Aad8;w5NuhbH?t93`d>m5XjrC@T$&YQG$<=Ta zL53cd##W!ICg(Ox5F$Fwy-11dHP~+k(!pV(!cU(-*o#2fwOQ?Jy+1ksvcKus71e4K z{XigIOk%fUp%GUC=J5kEY^wy&73@&(Jhlt{lOsVR>a>1{Oi+xQY@llJCQ@lQ?H1Yc`t9i>Hrbbow25*Tq&! z_X1*gyv}z1C z-}_|MWq#1S+(p*YHAALdTcZ}HPy8f)9F@a;vpl)TvS0E2pPeU`1;&UeX`1qV+_h%i z3`{{zeJ0g+hq!Z>n^@(fHjRgZ`WiR}Id^YkGT^>-myg)d=S&zPVE0S?svVm=3ABuJ z2v2oxls8-~toHD{+$N9na~a8s$h$f&{m)plYnNgbcUu-VuyscG^z<(%#vk?84FtU- zia0o&j>c^qq1b|60E-YB&Ml@6Wf6d}{TFPHr>+nUL=ND65w& z3?9+{-t3(?p5bX`zS+n-Yvtu@(4O$^7QLPd%XuG});H0&{_sVD)^hO>GE8*cum-g7 z=dOIo^2?*-Sqg?~hFQMW9iw+jtH!gR9nTtim8bH{MKtZ^gWjpN@Y&>C!tO2fm9<9| z-Q+2ia+*}DA6DC{@bw-S0~A}%a=w??DAdU)V{__TA>Y>{W&viAZwWG_^eKH5zBvDU zs=ji2iq#=|SiCQ>!;Oh5Ns>xY(fJ zRQgDHpLpBixY_28^rJz9@4m}S-qBXsjTNZm`(oxf<7n1m{8g=jb}BS8`2}cYrMz3W z&Fx5Rz-yoQ;?u?9tANq9ovWUn(eb+XjA7o!CGWCiLCnlDbEPGi@4G7Iml!GhygYp%IK@WA&O#dDXo5t%0pN6(K08Xz4eT7QEX?TKbCR z$J3?91hVrH02&=Dxxuy`(AGT6cR_|6*xug0aHp8Em~If*CuWowM9QmeZu#ybaAm;T zj%`B66VwtkWAnz*0y?DFXvMFwD*0N{t)DutXH1;4-7a?A%W`nYL;KDnZKbgiJXDr;VP_Y zXtPH7HwP#CU4`E9EBESuSb)Oo(au#a-&JbE;#{0f|CeW_^Vd$lLIQpVJ`!1c0@;6O zm>|(W_U@+M+)F?}KzZ$1GS>B0kE(b;nq$W876aa&^bya0uEbxw>K3{D3FE~@QwsXfD82}?APT6 z#F$`+bDyCt&pTf9iBFtQnfM)&0FO_BDs2dn*AZg92Ds$|{bQk7lSY&!ey=Fa&`>a!idR;6J(mFWqb z_WN;3$I0WOxl@{JmSV(x_3O2nJHFDp5wl^HNMY*SI^@~UNL~&%{zvR@*#I! zRxab~&*{uxDko*QrZ%Tg2Vq_ls!X#9{?__bClYu3+A*(IX6CL@UuwLN;Z{M?Mw-)P zv0I>@sfK5v_eNTHj^Dzei#X9Nh#D8%I%pXq?90?WyLSfsNzyn6PkDD5S@%uP&Cv0j zc!$?uCH**P)+ba8nM=dFpR{@QozDKKNPCqP;4Ld55RmE<2rxeu$1-?jnuv$iF4g>NyN3Mb`;JBG$h3<#ACyi;Grxk3LC^x%L=i< zBojnUA8?BE8Ze9OiA<12O4n_>yajxSZXfyrX;=B%JM8^GP2QO-qTCcw0b2Xn&uFfn zk2l5Vm#U>_bCjxjnigX=A-#bZySv&ZZGR&iQ2t+AcnHJ9`wh;N_T^HJ7;@>^0k?0sgJbMbewM6 z;?m~ejd#@)N=@!1nIi!6anU>wQ?)ABB4i<_xi0Z5D|XpCBrv5esSb-?j*mCZQCXaC z6eov8MAG3k%b1-&&ra!Ft+$5#*IBJtU|U4 zZT!CP-~MTTLyv$r+sNV@Fxabo;K1LO6h%%vreSZfvxH$f6^okB#KOnu$_GR`)8?-Aqs{l zI#iN($ki{+J(b%Z7b;A(bl2Y9`_UMD7j>eriIzF=Y|$Q%DI_4!1ghEv6mfbN%I_m* zp+K?3hY52<%>wTZ4!w|_)Rs1JGK)?DkIQe0l-sZsTj6l^EM*R0f3%9%1e%pgzpfU;42hjbEt@%%3qEn@1VJi zN#B#)W1&VE6a1dO`Q5MjWjg95M-7Laq=9)qTE!BBH%EafO3N7!!#}4yfj7j?G6t%u zRR9J?FF3nEUj`l@oG(c@tQgL%_3r*cdAFBIG|0UG#3(Q1r2K)+qQ=FIUzEQdk)}35 zcqP*K`yrRmqXiG5dUO3bwx3<|T#rmMxf$KJVZ zVlxQ!tNfvicUyXswZbET0GMaPD_<%Zn89z8Fn%1oU_8{8sk9t*{&zrjwveNDI1gLZ z6o}PW)oB&+ro5EooG#7$q%>~HGh6&{>@ad1*6?Xe&@d}hbes5&tNo~(5KfujfKPo_ zif7FO5Hob|#>WYfrETN|x45$OgwoK-vCfC+Z+9M;3Aem=((fI(^UvB>=#O0v3Y78q zSbU>2@^Hz3Zb|V4CQvZBlD@SO<7zLGA?#@_^~cJI0A9l9d}6tCOnYRdQqc5g92dcK0%QJ4>pQ@#cR zreGj<3$bUPwn~u}?)(TO5QbMH*F3l5h0Ql)-jVYvf=~iE1qVeYpXFp*WC6Lz~@8qy`hH&;FP!fU{>n;*4UISA&T~fxGHO;lhA>Aj4`e)_A_fM<_;jk$SgJ$ zZ-4D>f9!;n#Y(cI-)n%PD(!hsWBA2YtY7~&1zR*E50Ylf!&uJ`v zkG&2jg`2sE2B_;T^Tf7^%DAa}9bdP#S|MzRouTZ!{;I?2msIUR<9n{d5!|(!8prjG zbE0A#78#&+Kb#Ja3MQ;1Up zLRojW_~uCUzpy46d%FEnIp}@y&^$?)XVD9FIZ$tjPAiuX5Bw0EJ-sAY_y{EqELli*jy-}?Lel^bzxIwbS`YwWxnn%Cd5^g3a(Nh?Hgl$`>{?;eVt_$d=!0h$5HIrlQ;7{!B*Ui0C1~cU}W4e?q$BCaY=aPmMg7|Df)e`%fHT z=Rwfu`o10WZPq@Z$Xw#2@%?QaHG}78xmWS!M1P zh}jVfQB&gP>V%B5;jvS%2MK;T(rD{xb;3eP#o@#wt5)%Aa3ixM@v0T_5Js4z)DvdC>8O*Ra0lr}3;h@?Lo82vtn+?Jn4BB$*IlgDHn0a*rbNlqe=>`M@A z8um%Fu^$^SPl7&oG*){a2>I^vvbx%~?)#%U4*-d^>_42m2UfUv3yPl$dCujkNz>un zAojIn10Wg)_yx7(WXL*VWesA*&R!q;ixqGkd_le9>O5NQ*?+U$xiv@hRtr_0`GRGV zMz-yK+4$i_V0rx(T7Wxxih5-~yV>$<+tdBMoNw85iVMn0VEFjSU2*^_%~iFW|AZ-9 z#2sk?BnKmJa-I1N{rP>)i5W&`85|xz15l*)Z|2zrGy5PpFG~%g8AwUK5|I;wt1Z;u zed@$V_dgq*uF&t)!4-aHo-E^aO6?vGCBoV4yFd`J8#C7ZwzpXw0F{Q?M#;`>EQq!- z$Ub_82`;Zw7aCpXu#MCk9MmS{P-VHd2x}NV+)YZE*`20t#x5dE%67+n7eD{Ln`boZ zIMhy`bm_dNUz9ZGs_SU8Pr?_f%tK(?5t5Yplg+}^8gX=oiOIz|;WtdRzIRL1_ba<# zClYB*Ronj<0N#0?jx|v+B_#I6-iii^f}{2o7kZ?)f3At!s6ll0buq!O5XPAqknNfB zEv(9dcotqTvXsbXO#T0ywHgOE>^7D0FwQjn%vn!3go)y$xJ(F}M1 z{d5muS+3`V)ro!Osn08PgCUuu(-nWc1EI{!&gd8Wusd|MG6NRSv_fumKmiZ{=H%Y3 z_EAKR$Qfg9*hy-XDFmn}or2C9BS z3fxgIp=Cl32#c1IQtg$E1qs6(7(+5j)*0H;ES(SyW9cgciq{Mk?j#H-7r@Z# z0hJdEI3@0>xj3&^-~C*C;YCvh<^;r!U8QZfpKCJ1X@8CkVb{?*>0FOnhqX|=8~B^7 zPp0^Q|M#w2yBsS(Ky)`_vtD{0{QgRtZ-IFr>9MZ3lAk@6uCt89(Ep)m0a^YI;DX;{5Y=}yEGPV5$ zoMO4h{c*qe)H%y((%3ONMsWtrD*^qoK4Fqwf zd+JWzs&RxMQdL4j6D$DFDs@lL$Dnee(7LFsnQsLq`P7B%4RMVb1-wkiZ3tlP?~9mo*h0m zkL{xYsG+X-I6MVA;o1l~Z?J1vKV!ZClU-gyL4C}93|<11LV4z?ZTp`voA56wbQIh6 zMAN-SU!kH8RmfzGy#CpmaJdtK1jWChwV#yT|0;61237;VV>i@M*>?nR55p9`h7at2 zt93U36*m8^Qdb3t+^Jj*;~k++^pXahzkT_GM{xg(N8C&cqgbf^fh-kE2L0Fzqc?zs z1NZ!C+@Gj5AfNafNVxziF}sq<9t8pTfhYhi*A>^&bRoj`{~va4l}uvJfJwu%>{B;w@QxktZIYP0 zGw6b?26)pIlm&$)OO4e`wpU5ll4R-A>t!03+WT9P@eHtuIiTs~xyR_ehT61eP{o1Zojs`sz%Sm1m%>A^ae4ixP)OfZ zrShy=)vpM~s!98UoU7B^8&Zd7D}&wecRss-P2tX8CLQ41u@cGz)Sg!%``3<+_{~eb z=~s-m;sj-OyN)!?jFj#og}*3Q_hr5P01RA|rfh*7AxEEv(8UqXV7~7jLj^#FIn=z} zpKHPrDD`%aAK)0gVPpLnk+BT$tGc+OpUTct#&pTiq1844ZY#Q6k)y=EJlA$A75<>- zmPE|{{!8R^i-%hQQLpF(*1Hq4tmKqtBkvyX-^N3V6M-Y6=MTD<$pX3#*S!6iSnraf z=x12J!OY2~($o-J{-}Dm$J!`QdFST9mg&ale4Ia*2!P2W!3r0clM&z6^?xzxlIBO#uC+5`Pc;7CXBTFJ zA>wDIV@4j_!sI_m4O$Kaq6R2 zJqolNxYeulNkeY2X7RIHPGlKQb;BLCA7S(t}U z5q*nll?s63?pXijbTVz__4%-b?2R7!#{HMAxBdM!e%CELPUAVc)rkO>)fn1) zNQZdg=0oQDq0A7w(HnujMAfNR>A0~i#;N_N+Z-5Owpgnp|j$YVtAu(3$E{dw{JwEus zfchQf+jJ1Gc17w3no8?`=wr$jW@HGZ&(5(4nd*-@qNjM*+xu~^5dxee*w^a(=fT?6 zDRWH9Ix=`L8m#<&hDnd^ zr6NW7rH@avt#5L4UGMsSu0(2%1y)PJYqoxfZHGGuMbv^R_`oXHPfgi^ve*(XAH4qL z>jl8fhL#hib&-{*CY3c1Ho8(tZ|MOr4U6+ZwF=SuN;7+G#0gK?jP=bOfcOERfNh{1 zJGF`3QTOK3?lf6X$(pqCZOZ_5M`Pb0AW~bZ%n)flv!b1Z4`#rtDitdn8nJ`d)Nszh zwr|7TPxhKsFu@d77yIzb1dB}`c(TERPDmHepGw{D?t5Y6h9@v0 z0>ZSc`g6Hf5J>As&iYja{@8<@jG~hn2W(4OjLecMV1Cq%~U48eKqj$eJ(8xqTg+3U6guBV`#) z`+`@U^r4R0Yh%(PQ2k*8gR~2Tz?XaWfLlz$OYkHb2M)w(yv&Bcl4DNiG>{=aoy0KZ z`95K62aFpJms((Q;e^}TAZ&9>yx|Qm59{nnh>i{9L9&|Yk7c#6T@a{) zq$gQhQ?HQf_d_9ii>rm(?^Rx)D*pS!6M+xUDZzm^+-E6tmt0(4&T9~QHpzTLHUl+P z3Y;%p8g3Sc$BQ)-$g)HFQRsl`0^DMfnOf8uBuN!2e&6nPc_t#3qK2Q`nfgM3k}1AWZGw;?_vhu zD0RTAheo)Mwr1huZgPh3(a)bKnKiPrc?;oL>5GoBN2^q?wI8Mqpm^REsXQ2l27x5Nk#icSoUN`PJ` zr~nItEQ&|r+Q7F&%g$><;^Nc(**uZBMnG}+Mu)*Q@YtLPweQGmjj;V}4lGCXUobZV zS97CA4E##j%bRQ=^@>#VnlCtDUn=@Ww&_uP= zC?GO|GLhbQ01aPOyha4~)d}bf{VidzAN?;?K>*RMEB?K#HHuulvGQR%Hva!vabrOC zc6by~k;bSPV2v)#mzJ{H((pQW6XwuWX+jfT1(e+B*47g&o{RpQ+?<1nogkAZvS?=m zSG5(VB52+`4M*b*Q~ecJl%!@!y-fxN?>7w!EEC&nJnt8M*{YzbiP1r&4wl;KU~ zA4&qqZF<6Hn_l{IX=z6GI)YJni03 zV){fdM?t|?fDrUnFw(&P+1Bv{2QPX#6yLv_T~R{@?Xjow7LoxhZ?#1{i#eaKudC;C zC=CChHKom{kPS+CIIPC4`NGeCeU1t$F@vYGYyLAf49${}TEcNPT6kg}{UlP>FwEQV zd(i7qlFN-lNss)_4mEm7xL*bC_+Ok)l;ce=|L-a9oh!KSkN_^0d1u?!-)D;Qgc4fe zS*R0X_5DgOvRy-y<{I0Kr@2C1OS(A^Te;zuba1ld6il7QwaO(@ATpm|7-o(;{L16( zHm@2dArh~eg-$UWno;8ZFgS(I4w_KOe)uy8?lF)f8|kS$pWeX z_Vy^D101fmk=wR`K2N`!(B5*uL){`YT3=)9B+WWSH=UI8-M+m#9@rsFM|BXtkMQoW zibVZku46*C2S&7VYJ=6$h8)2`#tgTM^=$6f{S3xD$g6xvH-Vv$r=`@*asWtVe;Rx4 z0+Uje3)AO)?UclM(096g36|tD%HuKCxEgRrRqPiVWXuPM=rMs*?f#J8 z5iQcS16=5l`*+@W1RUhJoqc`q;DHQh$hm@!k4^2@-$LtRf7z20^p77uIMa((VAs3p z--HK>N*4aA5@YC}p_Yr5Gp*Ubo?NQ$6QM`oZ_v*i{-__XX&e;Tbi$4rYtB185KW5u zjcwtNg%_zn?p4i2qTCo5RF9r2$ms}FsbGG>w{o0cOm#Gl?tV5JCE?*Lq3$=?z2B@ zU0Zf|E4Tflck!;?Y`QayGOAKNUsvi)IuG3(zIu#sPORv0&1E&W)_c0jADwo0%op^? z=UBh(>3;ZZ|6Lbh>O3O(FWX%gVeMtWy9g_(c<3aTa#)rc$zSw;@$Pqh*RvA!r2&QY z(gK4ABjW|y533K?hV5mfS_$EK@jT(;AW?j2lCU-e3Q@By<>Q4FM+12>xY_H|>~m@4 zHdenSPk@@MI^iF7&XO=^IjGw{1%sv5jr(P6%5tZ{b!P$|i$R)MLrl=FlV zpm+@ul{T+C7)OQEltMBvk+8Ff!N6Uf~hyJJ(jUwd(!RQ4*=v*UdI5&*M)9QO^5v3_^xtuGZ|~N zmSH4v`+MKYTy}9@k|PK+C|#Y>AJ!3#__{iAimI#GpQei~R+bX)O+#BYjq@Tow)K7# zn78$TD6GUXtczDHqummKr*dAi$5z2q7|1y=Mjv6>GguMs2l_!|j5=g+ufqU$+%`Ca zwG;m0szbjuw#XVE^guAZxVOpY6zkx`l0e@kY=yVj69{4$x(7fzde+QL27BG8L-IN( z#Or~^q5eSAHf&YAO{NSZeWzCTb8J@lrCPc|WmbxtY(`oo76|1QgaySnOS&hI2K5j3W`It7Ps{4Ymyn(E%wA}D zN@4^!GC(>uCYX7D`4u7QMZjpB9^WxJR5=HW<=%^^Npcx<+guNTBfyOZjiVm4&Sw7e z+IWdo&datAUtpdUC>>!o#RY_iEgOn+8&)?Ng<3k71=Dt+R2R0GaO=D$x{9IC8l2vq zPuu`S-pn_6Q&dT4nyl$Rc)+xrQSKU^kA0oMXFc+>AJy>s70LIE2HKPpAc+f7bBrDK z*%sfR-;p5Aoq1yyugN1%hx(Wg*!A3K}&x&Zsx_50dbn?jwz+HH1%J?x8rSnxR)j7ReVCQcZoO z@sLu`Qot=+6N?&9-r@X418o3>Ugf)BT8H6)H)TJ(0yOwTJgYj!_-zb612V+ThcP55 z+<~U`*JBy%e56bRn~sQeuQLNZZFj@f!7j7+h1(W`a!Z3@I9MpGeKQ$LmInDG$wRlZ zM_HohbFcy^)qt42t$cr1WHVdWN@ZUMo+%$tOrK{U+sj)bpPWd{7eA~^5V*=vS%EKx zyHQP4uZ)8nbx2KQ*A^hYi9-J6aq4_013{GkI2#ZduuRw9E3+(01%lOX^;Uv60>KWe zPUGXw2m9iy7q0=zVPX~wi7lk|2<7z34;1eg=x^tvnh*sEhC*JL$0GAn zPVNKlcdrh#0pzV{7T5{z%|MpJjN0rvN!6TMr8T@=+3){CP*uD!CYr_RioLwgkrZMs z#7ItA!)7CX@p|0=o-EiuLUM-53NeqN8-7U;)FI!_yzpy7>wSpNlzz?^%xgO{s!-d> zY0%3tUd$_Qx ziJno-950N1&xgiHsF~E|MSgFLv)4`xDdCv=t4K`wnv@CIHXr1muAuewJqv)l4P>=< zg<&qAPU=Ubs(z}_sW$(#lP9)2`b(gimjOK6TdKhPYHk~VfZ z^EH&8ct_?#Pv{}mePPIJOz^2KC640n+gS;bsF=d>M~P6l2C=x_X8i}=D84O2OYRIT z^Gg5;;zpRG)J&DU?Zk0rF;SA-Zw}vEQNNdehv^00B<+cfH+NG;JHLiL?MPQEg zo^WX3y4Z|xqQY9zXK>bBA2rH&3JAW)54oqfch%I(Rg z4DfxA>ydN*Aka>6F-&S&{6!&q0D5jxc;OC~?9cEvp3Gj-XEX0sP1ur0aNxlL1S#X{ zBy4msFg-b`a#5Nk%?calCTnkBZwi+8Gy~7>l!$(*rRjofHXI3lASBBgD$f>8g^qFH zpuuIH7k5WJsL4%Jp<>Azfm!;q>IMW3g(pCo(j9&H(lwzyIDZRl6p)1JAeqQBc?UJYfCC zOf$3NoDFO?DojjGnLe=psr59Go=Mxq63qU@zc*av`syT6_2LuT?cl6T0qOK)gaaVF z?LBqMGuUx;_{@{&5-Y zehrM2Yh9})67d%=dsx}GdZ39;LTI_9WFyOtr<|dWZddQ#mZN_Z z+UQKZn`jA$_X=)snhd+Y*-!iEDKI$;jP0^atHKSZ$4m?>%MMe4qU?N{+wTm|Vot0| z(2*<8H@>7kX*s$>n#W{9NKxSDs?UQ%N*EtCM4cZeF9pvt_E^~K6THG^$e%blt97C2 zNQ^EAO5_i6*F9SBi5+Oin2(!HvniFto;6qoUfssuh7MTM9xwa4Ps{@`qt73Bj7e(`6-vrH880Yn?>bX3%$61JJ^f(?0q=KKp@1yAUfg6W26~$o zB3c@sObaUuID6BActB&=xH`T%oOG3&DbeEV({w7-(bBYxM8gbWwdRe!t#k&HL`8Qp zIPlkiiTA*gUZt*|i^e?lKc*7A*)jL*h?gmMmH9z(y{0v(Pd=E1A zn=zUszr^K`*wmR|K$@c->qe{J>&&_v?o7zJnUZk#jfoKo!FIVci35l5yCtSMQ3lz* z-TZIa(i>B?NIDCD)b3fL4PZ|90Xt#VeT;XXV$4Ai9$FK=Y*vf3&Z}_0KLj)9n%2lQ z#|2l=EmBTCOdD1ClcP(#$j|aH zh^{Lxes&@{uurX75PrSZwftNsxPJHU2>$f@mWGZgEc z68Ro8hN9bEE(3qy|NDT}FaQ!aiRZS$g1gapQNItg6vKV%XOPT#A+}#cuR2Y2{t%7BCbpS{-z`?-R+pG0U-OXV8vsDR4Zk|m2zn~k(~ zi;~Fm=jViq={@SA?3JdJ8k2P7Oog?iW|ws2GVV6|_)~AZyx_s|Nx5mX4|#ardbk1B z7sT?bmG?3OT||q%rJMZav3hIL4DM#0#r(nt?!bY*B9g7IW#A*s5xC`; z*Y39$3NpjFpCG;jQ5_Khj|BKRelehU5~oYQ`&raDitL* zI>LMQ^s`YDN?(%VM(OOFcx;5HcFFCOW+rj+F!SEFidGVULpz-X0ztIA!5~#kaEvXJ zg>lfTCRFw@+Ya_3C2vL53;2i)UAO{P#P(arU6`@Dfe=L%4)WkWl8ZSQ^u-Ep&G=~B zTY2z^<~s}AP3ji7{GYy)&RC?JJ3puVKja7<+zw^n%0_VES1J`{Y>6^B=tsNss2n4u zL^FL=duCH%U+Bdw@YH{eKBs;*9fgOo69GCpxCu`L>V*M*5Y}N?)aV_;^AHCdqYN$d zekZAj^&pJxmLA?Uy%78@qzk6M^VwSlP812koeG3Z!sd0}I3XSqMPw6xysLw2iP-l^ zW>G7{G5KnXmd>YqdH8XKn%QIYPq6c-O3m zHHm6G_gC#w(ue~u`M6{+1>%P)Z;24I2LY8c5pd4SrZq0gE9ijbT)V3%=yw}_`OkZ) z)yk=;f%}wv-k|#Km-^-@ugX<()pFDSG$^ae_#(mlF*U*?fBxoqJ!M=oQW9nD(*~Z~ w3|G*n0e!g$ke$+oWzb~c{~Vp>2H4lKGG*O4l@Gn37{J$4MKy(Td5fU`14%OhivR!s literal 0 HcmV?d00001 diff --git a/docs/installation.md b/docs/installation.md index 55e42ad3..8163b74c 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -1,17 +1,17 @@ --- -title: Installing the Micro Manager +title: Get the Micro Manager permalink: tooling-micro-manager-installation.html keywords: tooling, macro-micro, two-scale summary: Install the Micro Manager by running `pip install --user micro-manager-precice`. --- -## Installation +## Get the latest Micro Manager release -The Micro Manager is a Python package that can be installed using `pip`. Make sure [preCICE](installation-overview.html) is installed before installing the Micro Manager. The Micro Manager is compatible with preCICE version [2.5.0](https://github.com/precice/precice/releases/tag/v2.5.0). +The Micro Manager can be installed using `pip`. Make sure [preCICE](installation-overview.html) is installed before installing the Micro Manager. The Micro Manager is compatible with preCICE version [2.3.0](https://github.com/precice/precice/releases/tag/v2.3.0) and higher. -### Option 1: Using pip +### Option 1: Install from PyPI -It is recommended to install [micro-manager-precice from PyPI](https://pypi.org/project/micro-manager-precice/) by running +The Micro Manager package has the name [micro-manager-precice](https://pypi.org/project/micro-manager-precice/) on PyPI. To install `micro-manager-precice`, run ```bash pip install --user micro-manager-precice @@ -19,30 +19,44 @@ pip install --user micro-manager-precice Unless already installed, the dependencies will be installed by `pip` during the installation procedure. preCICE itself needs to be installed separately. If you encounter problems in the direct installation, see the [dependencies section](#required-dependencies) below. -### Option 2: Clone this repository and install manually +### Option 2: Install manually #### Required dependencies Ensure that the following dependencies are installed: -* Python 3 or higher -* [preCICE](installation-overview.html) [v2.5.0](https://github.com/precice/precice/releases/tag/v2.5.0) +* Python 3 +* [preCICE](installation-overview.html) [v2.3.0](https://github.com/precice/precice/releases/tag/v2.3.0) or higher * [pyprecice: Python language bindings for preCICE](installation-bindings-python.html) * [numpy](https://numpy.org/install/) * [mpi4py](https://mpi4py.readthedocs.io/en/stable/install.html) -#### Build and install the Manager using pip +#### Clone this repository -After cloning this repository, go to the directory `micro-manager/` and run +```bash +git clone https://github.com/precice/micro-manager.git +``` + +#### Build manually using pip + +Go to the directory `micro-manager/` and run ```bash pip install --user . ``` -#### Build and install the Manager using Python +#### Build manually using Python -After cloning this repository, go to the project directory `micro-manager/` and run +Go to the project directory `micro-manager/` and run ```bash python setup.py install --user ``` + +## Get the latest development version + +If you want to use the latest development version of the Micro Manager, clone the develop[https://github.com/precice/micro-manager/tree/develop] branch and then [build manually using pip](#build-manually-using-pip). + +## Next steps + +After successfully installing the Micro Manager, proceed to [preparing your micro simulation for the coupling](tooling-micro-manager-prepare-micro-simulation.html). diff --git a/docs/micro-simulation-convert-to-library.md b/docs/micro-simulation-convert-to-library.md index c4005104..f8900f1b 100644 --- a/docs/micro-simulation-convert-to-library.md +++ b/docs/micro-simulation-convert-to-library.md @@ -1,20 +1,21 @@ --- -title: Convert Your Micro Simulation to Library -permalink: tooling-micro-manager-micro-simulation-callable-library.html +title: Prepare micro simulation +permalink: tooling-micro-manager-prepare-micro-simulation.html keywords: tooling, macro-micro, two-scale -summary: You need to create an Python-importable class from your micro simulation code. +summary: Create an Python-importable class from your micro simulation code. --- -## Steps to convert micro simulation code to a callable library +The Micro Manager requires that the micro simulation code be in a predefined class structure. As the Micro Manager is written in Python, micro simulation codes written in Python are the easiest to prepare. For micro simulation codes not written in Python, look at the [C++ micro simulation section](#create-an-python-importable-class-from-your-micro-simulation-code-written-in-c) below. -The Micro Manager requires a specific structure of the micro simulation code. Create a class that can be called from Python with the structure given below. The docstring of each function gives information on what it should do and what its input and output should be. +{% note %} The Micro Manager [solver dummy examples](https://github.com/precice/micro-manager/tree/develop/examples) are minimal code examples with the predefined class structure. We recommend copying the appropriate example and modifying it with your micro simulation code to create a Python-importable class. {% endnote %} + +Restructure your micro simulation code into a Python class with the structure given below. The docstring of each function gives information on what it should do and what its input and output should be. ```python class MicroSimulation: # Name is fixed - def __init__(self): """ - Constructor of class MicroSimulation. Initialize all class member variables here. + Constructor of class MicroSimulation. """ def initialize(self) -> dict: @@ -24,24 +25,24 @@ class MicroSimulation: # Name is fixed Returns ------- data : dict - Python dictionary with keys as names of micro data and values as the data at the initial condition + Python dictionary with names of micro data as keys and the data as values at the initial condition """ def solve(self, macro_data, dt) -> dict: """ - Solve one time step of the micro simulation or for steady-state problems: solve until steady state is reached. + Solve one time step of the micro simulation for transient problems or solve until steady state for steady-state problems. Parameters ---------- macro_data : dict - Dictionary with keys as names of macro data and values as the data + Dictionary with names of macro data as keys and the data as values dt : float Time step size Returns ------- micro_data : dict - Dictionary with keys as names of micro data and values as the updated micro data + Dictionary with names of micro data as keys and the updated micro data a values """ def save_checkpoint(self): @@ -63,30 +64,24 @@ class MicroSimulation: # Name is fixed """ ``` -Skeleton dummy code of a sample MicroSimulation class can be found in the [examples/](https://github.com/precice/micro-manager/tree/main/examples/) directory. There are two variants - -* `examples/python-dummy/`: Dummy micro simulation written in Python -* `examples/cpp-dummy/`: Dummy micro simulation written in C++ and compiled to a Python library using [pybind11](https://pybind11.readthedocs.io/en/stable/) - -### Convert your micro simulation written in C++ to a callable library +A dummy code of a sample MicroSimulation class can be found in the [examples/python-dummy/micro_dummy.py](https://github.com/precice/micro-manager/blob/develop/examples/python-dummy/micro_dummy.py) directory. -A C++ dummy micro simulation is provided in [`examples/cpp-dummy/`](github.com/precice/micro-manager/tree/main/examples/cpp-dummy). -It uses [pybind11](https://pybind11.readthedocs.io/en/stable/) to compile the C++ code into a library which can be imported in Python. If the micro simulation in C++, [install pybind11](https://pybind11.readthedocs.io/en/stable/installing.html). +## Create an Python-importable class from your micro simulation code written in C++ -Creating a new micro simulation in C++ has the following steps +A dummy C++ dummy micro simulation code having a Python-importable class structure is provided in [`examples/cpp-dummy/micro_cpp_dummy.cpp`](https://github.com/precice/micro-manager/blob/develop/examples/cpp-dummy/micro_cpp_dummy.cpp). It uses [pybind11](https://pybind11.readthedocs.io/en/stable/) to enable control and use from Python. Restructuring a C++ micro simulation code has the following steps -1. Create a C++ class which implements the functions given [above](#steps-to-convert-micro-simulation-code-to-a-callable-library). +1. Create a C++ class which implements the functions given in the code snippet above. The `solve()` function should have the following signature: ```cpp py::dict MicroSimulation::solve(py::dict macro_data, double dt) ``` - [`py::dict`](https://pybind11.readthedocs.io/en/stable/advanced/pycpp/object.html?#instantiating-compound-python-types-from-c) is a Python dictionary which can be used to pass data between Python and C++. You need to cast the data to the correct type before using it in C++ and vice versa. An example is given in the dummy micro simulation. + [`py::dict`](https://pybind11.readthedocs.io/en/stable/advanced/pycpp/object.html?#instantiating-compound-python-types-from-c) is a Python dictionary which can be used to pass data between Python and C++. Cast the data to the correct type before using it in C++ and vice versa. 2. Export the C++ class to Python using pybind11. Follow the instructions to exporting classes in the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/classes.html) or read their [first steps](https://pybind11.readthedocs.io/en/stable/basics.html) to get started. -3. Compile the C++ library including pybind11. For the solverdummy, run +3. Compile the C++ library including pybind11. For example, for the solverdummy, the command is ```bash c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) @@ -97,4 +92,4 @@ The `solve()` function should have the following signature: ## Next Steps -With your code converted to a library, you can now [create a coupling configuration](tooling-micro-manager-usage-configuration.html). +After restructuring your micro simulation code into a Python-importable class structure, [configure the Micro Manager](tooling-micro-manager-usage-configuration.html). diff --git a/docs/running.md b/docs/running.md index 44c4dd72..f3886595 100644 --- a/docs/running.md +++ b/docs/running.md @@ -1,5 +1,5 @@ --- -title: Running the Micro Manager +title: Run the Micro Manager permalink: tooling-micro-manager-running.html keywords: tooling, macro-micro, two-scale summary: Run the Micro Manager from the terminal with a configuration file as input argument or from a Python script. @@ -28,3 +28,9 @@ The Micro Manager can also be run in parallel, using the same script as stated a ```bash mpirun -n python3 run-micro-manager.py ``` + +A script running the Micro Manager can also be executed in parallel + +```bash +mpirun -n run-micro-manager.py +``` diff --git a/examples/README.md b/examples/README.md index 88744b50..98af528f 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,6 +3,7 @@ The `solverdummies` are minimal working examples for using the preCICE Micro Manager with different languages. At the moment, there are examples for Python, and C++. They can be coupled with any other solver, for example the `macro-dummy.py` in this directory. ## Python + To run the Python solverdummies, run the following commands in the `examples/` directory in **two different terminals**: ```bash @@ -13,6 +14,7 @@ python python-dummy/run_micro_manager.py Note that running `micro_manager micro-manager-config.json` from the terminal will not work, as the path in the configuration file is relative to the current working directory. See [#36](https://github.com/precice/micro-manager/issues/36) for more information. ## C++ + The C++ solverdummies have to be compiled first using [`pybind11`](https://pybind11.readthedocs.io/en/stable/index.html). To do so, install `pybind11` using `pip`: ```bash @@ -24,11 +26,13 @@ Then, run the following commands in the `cpp-dummy` directory: ```bash c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix) ``` +

Explanation The command above compiles the C++ solverdummy and creates a shared library that can be imported from python using `pybind11`. -- The `$(python3 -m pybind11 --includes)` part is necessary to include the correct header files for `pybind11`. + +- The `$(python3 -m pybind11 --includes)` part is necessary to include the correct header files for `pybind11`. - The `$(python3-config --extension-suffix)` part is necessary to create the correct file extension for the shared library. For more information, see the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/compiling.html#building-manually). - If you have multiple versions of Python installed, you might have to replace `python3-config` with `python3.8-config` or similar. From 1ca76820c642d3373e3e79ba67774413fc2acc67 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sat, 3 Jun 2023 21:20:28 +0200 Subject: [PATCH 54/87] Change the config parameter 'axiswise_ranks' to 'decomposition' --- docs/configuration.md | 2 +- micro_manager/config.py | 2 +- .../test_unit_cube_dummy/micro-manager-config-parallel-1.json | 2 +- .../test_unit_cube_dummy/micro-manager-config-parallel-2.json | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index 01c46f82..0309f2c7 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -117,7 +117,7 @@ Parameter | Description `coarsening_constant` | Coarsening constant $$ C_c $$, set as $$ C_c < 1 $$. `refining_constant` | Refining constant $$ C_r $$, set as $$ C_r >= 0 $$. `every_implicit_iteration` | If True, adaptivity is calculated in every implicit iteration.
If False, adaptivity is calculated once at the start of the time window and then reused in every implicit time iteration. -`adaptivity_similarity_measure`| Similarity measure to be used for adaptivity. Can be either `L1`, `L2`, `L1rel` or `L2rel`. By default, `L1` is used. The `rel` variants calculate the respective relative norms. This parameter is *optional*. +`similarity_measure`| Similarity measure to be used for adaptivity. Can be either `L1`, `L2`, `L1rel` or `L2rel`. By default, `L1` is used. The `rel` variants calculate the respective relative norms. This parameter is *optional*. Example of adaptivity configuration diff --git a/micro_manager/config.py b/micro_manager/config.py index a7e447aa..44d8b35f 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -96,7 +96,7 @@ def read_json(self, config_filename): self._macro_domain_bounds = data["simulation_params"]["macro_domain_bounds"] try: - self._ranks_per_axis = data["simulation_params"]["axiswise_ranks"] + self._ranks_per_axis = data["simulation_params"]["decomposition"] except BaseException: print("Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json index 8e71edcf..bcbe34b6 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json @@ -8,7 +8,7 @@ }, "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], - "axiswise_ranks": [1, 1, 2] + "decomposition": [1, 1, 2] }, "diagnostics": { "output_micro_sim_solve_time": "True" diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json index 487ae129..63b5c1f0 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json @@ -8,7 +8,7 @@ }, "simulation_params": { "macro_domain_bounds": [0, 1, 0, 1, 0, 1], - "axiswise_ranks": [1, 2, 3] + "decomposition": [1, 2, 3] }, "diagnostics": { "output_micro_sim_solve_time": "True" From 98c7996ef76c52be6b1b4e3f0f454c989ac9dbc8 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 5 Jun 2023 10:41:45 +0200 Subject: [PATCH 55/87] Fixing links in the documentation and small corrections --- docs/README.md | 2 +- docs/configuration.md | 6 +++--- docs/running.md | 6 ------ 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/docs/README.md b/docs/README.md index 849d2f79..e3fbb69e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -24,6 +24,6 @@ The Micro Manager couples many micro simulations with one macro simulation. This To use the Micro Manager for a macro-micro coupling, your micro simulation code needs to be in a library format with a specific class name and functions with specific names. For a macro-micro coupled problem, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. To setup a macro-micro coupled simulation using the Micro Manager, follow these steps: - [Installation](tooling-micro-manager-installation.html) -- [Preparing micro simulation](tooling-micro-manager-micro-simulation-callable-library.html) +- [Preparing micro simulation](tooling-micro-manager-prepare-micro-simulation.html) - [Configuration](tooling-micro-manager-configuration.html) - [Running](tooling-micro-manager-running.html) diff --git a/docs/configuration.md b/docs/configuration.md index 0309f2c7..bc237024 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -27,7 +27,7 @@ The Micro Manager is configured with a JSON file. An example configuration file } ``` -This example configuration file is in [`examples/micro-manager-config.json`](https://github.com/precice/micro-manager/tree/main/examples/micro-manager-config.json). +This example configuration file is in [`examples/micro-manager-config.json`](https://github.com/precice/micro-manager/tree/develop/examples/micro-manager-config.json). The path to the file containing the Python importable micro simulation class is specified in the `micro_file_name` parameter. If the file is not in the working directory, give the relative path. @@ -86,7 +86,7 @@ If `output_micro_sim_solve_time` is set, add similar entries for the data `micro ## Domain decomposition -The Micro Manager can be run in parallel. For a parallel run, set the desired partitions in each axis by setting the `decomposition` parameter. For example, if the domain is 3D and the decomposition needs to be two partitions in x, one partition in y, and sixteen partitions in forz, the setting is +The Micro Manager can be run in parallel. For a parallel run, set the desired partitions in each axis by setting the `decomposition` parameter. For example, if the domain is 3D and the decomposition needs to be two partitions in x, one partition in y, and sixteen partitions in for z, the setting is ```json "simulation_params": { @@ -103,7 +103,7 @@ The Micro Manager can adaptively control micro simulations. The adaptivity strat 1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. [10.1016/j.jcp.2012.12.025](https://doi.org/10.1016/j.jcp.2012.12.025). -2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). +2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu Sorin. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). All the adaptivity parameters are chosen from the second publication. diff --git a/docs/running.md b/docs/running.md index f3886595..a1ba74ca 100644 --- a/docs/running.md +++ b/docs/running.md @@ -28,9 +28,3 @@ The Micro Manager can also be run in parallel, using the same script as stated a ```bash mpirun -n python3 run-micro-manager.py ``` - -A script running the Micro Manager can also be executed in parallel - -```bash -mpirun -n run-micro-manager.py -``` From 035d8f5e6f43cec666ed5bbf0610eb879b425eaa Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 5 Jun 2023 11:13:04 +0200 Subject: [PATCH 56/87] Fix link to develop branch in installation documentation --- docs/installation.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/installation.md b/docs/installation.md index 8163b74c..dd66d970 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -55,7 +55,7 @@ python setup.py install --user ## Get the latest development version -If you want to use the latest development version of the Micro Manager, clone the develop[https://github.com/precice/micro-manager/tree/develop] branch and then [build manually using pip](#build-manually-using-pip). +If you want to use the latest development version of the Micro Manager, clone the [develop](https://github.com/precice/micro-manager/tree/develop) branch and then [build manually using pip](#build-manually-using-pip). ## Next steps From 4d139452dde5a3b24ad97820a8b4b097a5c0e13e Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 7 Jun 2023 15:19:35 +0200 Subject: [PATCH 57/87] Fixing broken links in documentation --- docs/configuration.md | 2 +- docs/installation.md | 14 ++++++-------- docs/micro-simulation-convert-to-library.md | 4 ++-- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/docs/configuration.md b/docs/configuration.md index bc237024..50282950 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -161,6 +161,6 @@ The Micro Manager uses the output functionality of preCICE, hence these data set ``` -## Next Steps +## Next step After creating a configuration file you are ready to [run the Micro Manager](tooling-micro-manager-running.html). diff --git a/docs/installation.md b/docs/installation.md index dd66d970..03f3f243 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -31,23 +31,21 @@ Ensure that the following dependencies are installed: * [numpy](https://numpy.org/install/) * [mpi4py](https://mpi4py.readthedocs.io/en/stable/install.html) -#### Clone this repository +#### Clone the Micro Manager ```bash git clone https://github.com/precice/micro-manager.git ``` -#### Build manually using pip +#### Install manually -Go to the directory `micro-manager/` and run +To install using `pip`, go to the directory `micro-manager/` and run ```bash pip install --user . ``` -#### Build manually using Python - -Go to the project directory `micro-manager/` and run +To install using Python, go to the project directory `micro-manager/` and run ```bash python setup.py install --user @@ -55,8 +53,8 @@ python setup.py install --user ## Get the latest development version -If you want to use the latest development version of the Micro Manager, clone the [develop](https://github.com/precice/micro-manager/tree/develop) branch and then [build manually using pip](#build-manually-using-pip). +If you want to use the latest development version of the Micro Manager, clone the [develop](https://github.com/precice/micro-manager/tree/develop) branch and then [build manually using pip](#install-manually). -## Next steps +## Next step After successfully installing the Micro Manager, proceed to [preparing your micro simulation for the coupling](tooling-micro-manager-prepare-micro-simulation.html). diff --git a/docs/micro-simulation-convert-to-library.md b/docs/micro-simulation-convert-to-library.md index f8900f1b..7859adfe 100644 --- a/docs/micro-simulation-convert-to-library.md +++ b/docs/micro-simulation-convert-to-library.md @@ -90,6 +90,6 @@ The `solve()` function should have the following signature: This will create a shared library `micro_dummy.so` which can be directly imported in Python. For more information on compiling C++ libraries, see the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/compiling.html). -## Next Steps +## Next step -After restructuring your micro simulation code into a Python-importable class structure, [configure the Micro Manager](tooling-micro-manager-usage-configuration.html). +After restructuring your micro simulation code into a Python-importable class structure, [configure the Micro Manager](tooling-micro-manager-configuration.html). From 041495d15351ae63fed123c8037f45aa9305fead Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 14 Jun 2023 10:13:44 +0200 Subject: [PATCH 58/87] Remove adaptivity references from main README as they are confusing here --- README.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/README.md b/README.md index feeb0525..8f1aefad 100644 --- a/README.md +++ b/README.md @@ -8,13 +8,6 @@ The main documentation is on the [preCICE website](https://precice.org/tooling-m Please report any bugs and issues [here](https://github.com/precice/micro-manager/issues) and give us feedback through [one of our community channels](https://precice.org/community-channels.html). -## References - The concept and initial design of the Micro Manager has been discussed in Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037). - -The Micro Manager can adaptively control micro simulations. The adaptivity strategy is taken from two publications - -1. Redeker, Magnus & Eck, Christof. (2013). A fast and accurate adaptive solution strategy for two-scale models with continuous inter-scale dependencies. Journal of Computational Physics. 240. 268-283. [10.1016/j.jcp.2012.12.025](https://doi.org/10.1016/j.jcp.2012.12.025). -2. Bastidas, Manuela & Bringedal, Carina & Pop, Iuliu. (2021). A two-scale iterative scheme for a phase-field model for precipitation and dissolution in porous media. Applied Mathematics and Computation. 396. 125933. [10.1016/j.amc.2020.125933](https://doi.org/10.1016/j.amc.2020.125933). From 79d3cb024bea6eac6741e342e5d497e910c24a55 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 16 Jun 2023 13:34:52 +0200 Subject: [PATCH 59/87] Prefix images in documentation with tooling-micro-manager- --- docs/README.md | 2 +- ...g => tooling-micro-manager-manager-solution.png} | Bin 2 files changed, 1 insertion(+), 1 deletion(-) rename docs/images/{ManagerSolution.png => tooling-micro-manager-manager-solution.png} (100%) diff --git a/docs/README.md b/docs/README.md index e3fbb69e..6fee76fc 100644 --- a/docs/README.md +++ b/docs/README.md @@ -9,7 +9,7 @@ summary: A tool to manage many micro simulations and couple them to a macro simu The Micro Manager manages many simulations on a micro scale and couples them to one simulation on a macro scale. For the coupling itself, it heavily relies on the coupling library [preCICE](https://precice.org/index.html). -![Micro Manager strategy schematic](images/ManagerSolution.png) +![Micro Manager strategy schematic](images/tooling-micro-manager-manager-solution.png) ## What can it do? diff --git a/docs/images/ManagerSolution.png b/docs/images/tooling-micro-manager-manager-solution.png similarity index 100% rename from docs/images/ManagerSolution.png rename to docs/images/tooling-micro-manager-manager-solution.png From 98d7e1dc01f9016926d2b26a65b44af498c0fa8b Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 16 Jun 2023 13:58:23 +0200 Subject: [PATCH 60/87] Turn off table of contents for the micro simulation conver to library doc page --- docs/micro-simulation-convert-to-library.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/micro-simulation-convert-to-library.md b/docs/micro-simulation-convert-to-library.md index 7859adfe..b582499a 100644 --- a/docs/micro-simulation-convert-to-library.md +++ b/docs/micro-simulation-convert-to-library.md @@ -2,6 +2,7 @@ title: Prepare micro simulation permalink: tooling-micro-manager-prepare-micro-simulation.html keywords: tooling, macro-micro, two-scale +toc: off summary: Create an Python-importable class from your micro simulation code. --- From fd797603c2b07e8ff9db714029d3471598875956 Mon Sep 17 00:00:00 2001 From: Benjamin Uekermann Date: Mon, 19 Jun 2023 14:49:32 +0200 Subject: [PATCH 61/87] Smooth over some documentation (#52) --- README.md | 6 ++---- micro_manager/domain_decomposition.py | 6 +++--- micro_manager/micro_manager.py | 10 +++++----- tests/unit/precice.py | 2 +- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 8f1aefad..56d5ad73 100644 --- a/README.md +++ b/README.md @@ -2,11 +2,9 @@ A tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://www.precice.org/). -## Start Here +The main documentation is rendered on the [preCICE website](https://precice.org/tooling-micro-manager-overview.html). -The main documentation is on the [preCICE website](https://precice.org/tooling-micro-manager-overview.html). - -Please report any bugs and issues [here](https://github.com/precice/micro-manager/issues) and give us feedback through [one of our community channels](https://precice.org/community-channels.html). +Please report any [issues](https://github.com/precice/micro-manager/issues) and give us feedback through [one of our community channels](https://precice.org/community-channels.html). The concept and initial design of the Micro Manager has been discussed in diff --git a/micro_manager/domain_decomposition.py b/micro_manager/domain_decomposition.py index 41c8c7b5..c1dd61e2 100644 --- a/micro_manager/domain_decomposition.py +++ b/micro_manager/domain_decomposition.py @@ -1,5 +1,5 @@ """ -Functionality to partition the macro domain according to the user provided partitions in each axis +Functionality to partition the macro domain according to partitions in each axis provided by the user """ import numpy as np @@ -21,11 +21,11 @@ def decompose_macro_domain(self, macro_bounds: list, ranks_per_axis: list) -> li macro_bounds : list List containing upper and lower bounds of the macro domain. Format in 2D is [x_min, x_max, y_min, y_max] - Format in 2D is [x_min, x_max, y_min, y_max, z_min, z_max] + Format in 3D is [x_min, x_max, y_min, y_max, z_min, z_max] ranks_per_axis : list List containing axis wise ranks for a parallel run Format in 2D is [ranks_x, ranks_y] - Format in 2D is [ranks_x, ranks_y, ranks_z] + Format in 3D is [ranks_x, ranks_y, ranks_z] Returns ------- diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 07dd2bad..74465068 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -118,11 +118,11 @@ def __init__(self, config_file: str) -> None: def initialize(self) -> None: """ This function does the following things: - - If the Micro Manager has been executed in parallel, it decomposes the domain as uniformly as possible. - - Initializes preCICE. - - Gets the macro mesh information from preCICE. - - Creates all micro simulation objects and initializes them if an initialization procedure is available. - - Writes initial data to preCICE. + - Decomposes the domain if the Micro Manager is executed in parallel + - Initializes preCICE + - Gets the macro mesh information from preCICE + - Creates all micro simulation objects and initializes them if an initialization procedure is available + - Writes initial data to preCICE """ # Decompose the macro-domain and set the mesh access region for each # partition in preCICE diff --git a/tests/unit/precice.py b/tests/unit/precice.py index 35d4c5fe..210bf9ba 100644 --- a/tests/unit/precice.py +++ b/tests/unit/precice.py @@ -1,4 +1,4 @@ -# This file mocks pyprecice, the Python bindings for preCICE and is used _only_ for unit testing the Micro Manager. +# This file mocks pyprecice, the Python bindings for preCICE, and is used _only_ for unit testing the Micro Manager. import numpy as np From 80d3292410c3a149a4c7ef70a47c1771421e4a19 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 6 Jul 2023 15:23:43 +0200 Subject: [PATCH 62/87] Add global variant to adaptivity (#42) * Make a copy of the active sim object which is associated to an inactive sim and put it in the list * Copy list of micro simulations in the association step and return the copied instance * Reduce total time of adaptivity integration test from 10 to 2 * Properly handling copying of active micro sim objects to the associated inactive ones * [WIP] Add option to toggle local or global adaptivity * [WIP] Implement local and global adaptivity in the initialization * [WIP] Handle global adaptivity for data reading * [WIP] Redesigning of the manager for global adaptivity * [WIP] Update micro sim states globally before working on micro sim objects locally * [WIP] Moving functionality to create MicroSimulation class outside of micro_manager.py * [WIP] Split adaptivity into two inherited classes for global and local functionality. (only skeleton code) * [WIP] Further refactoring of adaptivity code * [WIP] Streamlining * [WIP] Adding asnychronous receiving for the activation step * Add set methods to MicroSimulation class * Moving hashing in p2p operations to a separate function * Remove unnecessary import of deepcopy * Moving functionality to update active sims into base class and other implementation details of global adaptivity * Further generalization of adaptivity functionality and implementation of global adaptivity * Fix tests * Fixing bugs in refactoring of adaptivity calculation * [WIP] Refactor p2p communication in a separate function for global adaptivity * Add comments on send map and receive map dictionaries * [WIP] Simplifying association step in parent Adaptivity class * [WIP] Updating active sims and association functions cannot be generalized and need to be specific to local or global adaptivity * Getting local adaptivity tests to work * Pass only the global id while creating micro simulation objects * Fix call in micro_manager.py * Move compute_*_adaptivity functions into the respective adaptivity calculator classes * Repair tests * [WIP] Debugging global adaptivity * [WIP] Unit cube integration test now works with global adaptivity * Formatting * [WIP] Use deepcopy when copying one dict to another * Correct log output for number of active and inactive sims for global adaptivity * Use local IDs for logging active state * Simplifying the MicroSimulation class by removing information on adaptivity from the micro simulation objects * Use range() when iterating an int * Minor corrections * Fix bug in similarity distance calculation * [WIP] Restructure the way the similarity distance matrix is calculated * Changes to the unit test to make it work with global adaptivity * Fixing tests * Fix CPP dummy --- .github/workflows/run-adaptivity-test.yml | 10 +- .gitignore | 5 +- examples/cpp-dummy/micro_cpp_dummy.cpp | 43 +-- examples/cpp-dummy/micro_cpp_dummy.hpp | 6 +- examples/python-dummy/micro_dummy.py | 12 +- micro_manager/adaptivity/adaptivity.py | 119 +++++-- micro_manager/adaptivity/global_adaptivity.py | 279 ++++++++++++++++ micro_manager/adaptivity/local_adaptivity.py | 155 ++++----- micro_manager/micro_manager.py | 313 ++++++++++-------- micro_manager/micro_simulation.py | 77 +---- .../test_unit_cube_dummy/clean-test.sh | 1 + ...ger-config-global-adaptivity-parallel.json | 21 ++ ...icro-manager-config-global-adaptivity.json | 20 ++ ...icro-manager-config-local-adaptivity.json} | 3 - .../test_unit_cube_dummy/micro_dummy.py | 13 +- .../test_unit_cube_dummy/precice-config.xml | 6 +- .../test_unit_cube_dummy/unit_cube_macro.py | 28 +- ...-config.json => micro-manager-config.json} | 11 +- ...o-manager-unit-test-adaptivity-config.json | 21 -- tests/unit/test_adaptivity.py | 279 ++++++++++------ tests/unit/test_micro_manager.py | 29 +- 21 files changed, 925 insertions(+), 526 deletions(-) create mode 100644 micro_manager/adaptivity/global_adaptivity.py create mode 100644 tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity-parallel.json create mode 100644 tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity.json rename tests/integration/test_unit_cube_dummy/{micro-manager-config-adaptivity.json => micro-manager-config-local-adaptivity.json} (90%) rename tests/unit/{micro-manager-unit-test-config.json => micro-manager-config.json} (70%) delete mode 100644 tests/unit/micro-manager-unit-test-adaptivity-config.json diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index ab17719f..d0f2b5dd 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -30,9 +30,15 @@ jobs: working-directory: micro-manager run: pip3 install --user . - - name: Run integration test + - name: Run integration test with local adaptivity + timeout-minutes: 3 working-directory: micro-manager/tests/integration/test_unit_cube_dummy - run: python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-adaptivity.json + run: python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-local-adaptivity.json + + - name: Run integration test serially with global adaptivity + timeout-minutes: 3 + working-directory: micro-manager/tests/integration/test_unit_cube_dummy + run: python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-global-adaptivity.json adaptivity_unit_tests: name: Run adaptivity unit tests diff --git a/.gitignore b/.gitignore index ec86acfa..3251b572 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,7 @@ micro_manager.egg-info/ dist # vscode -.vscode \ No newline at end of file +.vscode + +# Tests output +*.log diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index 7681e1e6..17f782c6 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -27,8 +27,6 @@ void MicroSimulation::initialize() // Solve py::dict MicroSimulation::solve(py::dict macro_data, double dt) { - std::cout << "Solve timestep of micro problem\n"; - //! Code below shows how to convert input macro data and use it in your C++ solver // Create a double from macro_data["micro_scalar_data"], which is a Python float @@ -55,31 +53,19 @@ py::dict MicroSimulation::solve(py::dict macro_data, double dt) return micro_write_data; } -// Save Checkpoint -- only valid for implicit coupling -void MicroSimulation::save_checkpoint() -{ - std::cout << "Saving state of micro problem\n"; - _checkpoint = _micro_scalar_data; -} - -// Reload Checkpoint -- only valid for implicit coupling -void MicroSimulation::reload_checkpoint() -{ - std::cout << "Reverting to old state of micro problem\n"; - _micro_scalar_data = _checkpoint; -} - // This function needs to set the complete state of a micro simulation -void MicroSimulation::setState(double micro_scalar_data, double checkpoint) +void MicroSimulation::set_state(py::list state) { - _micro_scalar_data = micro_scalar_data; - _checkpoint = checkpoint; + _micro_scalar_data = state[0].cast(); + _checkpoint = state[1].cast(); } // This function needs to return variables which can fully define the state of a micro simulation -py::tuple MicroSimulation::getState() const +py::list MicroSimulation::get_state() const { - return py::make_tuple(_micro_scalar_data, _checkpoint); + std::vector state{_micro_scalar_data, _checkpoint}; + py::list state_python = py::cast(state); + return state_python; } PYBIND11_MODULE(micro_dummy, m) { @@ -90,23 +76,20 @@ PYBIND11_MODULE(micro_dummy, m) { .def(py::init()) .def("initialize", &MicroSimulation::initialize) .def("solve", &MicroSimulation::solve) - .def("save_checkpoint", &MicroSimulation::save_checkpoint) - .def("reload_checkpoint", &MicroSimulation::reload_checkpoint) - .def("get_state", &MicroSimulation::getState) - .def("set_state", &MicroSimulation::setState) + .def("get_state", &MicroSimulation::get_state) + .def("set_state", &MicroSimulation::set_state) .def(py::pickle( [](const MicroSimulation &ms) { // __getstate__ - /* Return a tuple that fully encodes the state of the object */ - return ms.getState(); + return ms.get_state(); }, - [](py::tuple t) { // __setstate__ + [](py::list t) { // __setstate__ if (t.size() != 2) throw std::runtime_error("Invalid state!"); - + /* Create a new C++ instance */ MicroSimulation ms; - ms.setState(t[0].cast(), t[1].cast()); + ms.set_state(t); return ms; } diff --git a/examples/cpp-dummy/micro_cpp_dummy.hpp b/examples/cpp-dummy/micro_cpp_dummy.hpp index 063b57c6..9c0acf69 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.hpp +++ b/examples/cpp-dummy/micro_cpp_dummy.hpp @@ -18,12 +18,10 @@ class MicroSimulation void initialize(); // solve takes a python dict data, and the timestep dt as inputs, and returns a python dict py::dict solve(py::dict macro_write_data, double dt); - void save_checkpoint(); - void reload_checkpoint(); MicroSimulation __deepcopy__(py::dict memo); - void setState(double micro_scalar_data, double checkpoint); - py::tuple getState() const; + void set_state(py::list state); + py::list get_state() const; private: double _micro_scalar_data; diff --git a/examples/python-dummy/micro_dummy.py b/examples/python-dummy/micro_dummy.py index e1a2465e..a961717e 100644 --- a/examples/python-dummy/micro_dummy.py +++ b/examples/python-dummy/micro_dummy.py @@ -16,13 +16,11 @@ def __init__(self): self._checkpoint = None def initialize(self): - print("Initialize micro problem") self._micro_scalar_data = 0 self._micro_vector_data = [] self._checkpoint = 0 def solve(self, macro_data, dt): - print("Solve timestep of micro problem") assert dt != 0 self._micro_vector_data = [] self._micro_scalar_data = macro_data["macro-scalar-data"] + 1 @@ -32,10 +30,8 @@ def solve(self, macro_data, dt): return {"micro-scalar-data": self._micro_scalar_data.copy(), "micro-vector-data": self._micro_vector_data.copy()} - def save_checkpoint(self): - print("Saving state of micro problem") - self._checkpoint = self._micro_scalar_data + def set_state(self, state): + self._checkpoint = state - def reload_checkpoint(self): - print("Reverting to old state of micro problem") - self._micro_scalar_data = self._checkpoint + def get_state(self): + return self._checkpoint diff --git a/micro_manager/adaptivity/adaptivity.py b/micro_manager/adaptivity/adaptivity.py index 32eea7d6..d9e4edd9 100644 --- a/micro_manager/adaptivity/adaptivity.py +++ b/micro_manager/adaptivity/adaptivity.py @@ -1,34 +1,39 @@ """ Functionality for adaptive initialization and control of micro simulations """ +import sys import numpy as np +from math import exp from typing import Callable class AdaptivityCalculator: - def __init__(self, configurator, global_ids) -> None: + def __init__(self, configurator, logger) -> None: # Names of data to be used for adaptivity computation self._refine_const = configurator.get_adaptivity_refining_const() self._coarse_const = configurator.get_adaptivity_coarsening_const() + self._hist_param = configurator.get_adaptivity_hist_param() + self._adaptivity_data_names = configurator.get_data_for_adaptivity() self._adaptivity_type = configurator.get_adaptivity_type() + + self._logger = logger + self._coarse_tol = 0.0 self._ref_tol = 0.0 - # Use set to make the "in" functionality faster for large lists - self._global_ids_of_local_sims = global_ids self._similarity_measure = self._get_similarity_measure(configurator.get_adaptivity_similarity_measure()) - def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np.ndarray) -> np.ndarray: + def _get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: dict) -> np.ndarray: """ Calculate metric which determines if two micro simulations are similar enough to have one of them deactivated. Parameters ---------- dt : float - Timestep + Current time step similarity_dists : numpy array 2D array having similarity distances between each micro simulation pair - data : numpy array + data : dict Data to be used in similarity distance calculation Returns @@ -38,22 +43,96 @@ def get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: np """ _similarity_dists = np.copy(similarity_dists) - if data.ndim == 1: - # If the adaptivity-data is a scalar for each simulation, - # expand the dimension to make it a 2D array to unify the calculation. - # The axis is later reduced with a norm. - data = np.expand_dims(data, axis=1) + data_diff = np.zeros_like(_similarity_dists) + for name in self._adaptivity_data_names: + data_vals = data[name] + if data_vals.ndim == 1: + # If the adaptivity-data is a scalar for each simulation, + # expand the dimension to make it a 2D array to unify the calculation. + # The axis is later reduced with a norm. + data_vals = np.expand_dims(data_vals, axis=1) + + data_diff += self._similarity_measure(data_vals) + + return exp(-self._hist_param * dt) * _similarity_dists + dt * data_diff + + def _update_active_sims( + self, + similarity_dists: np.ndarray, + is_sim_active: np.ndarray) -> np.ndarray: + """ + Update set of active micro simulations. Active micro simulations are compared to each other + and if found similar, one of them is deactivated. + + Parameters + ---------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + is_sim_active : numpy array + 1D array having state (active or inactive) of each micro simulation + + Returns + ------- + _is_sim_active : numpy array + Updated 1D array having state (active or inactive) of each micro simulation + """ + self._coarse_tol = self._coarse_const * self._refine_const * np.amax(similarity_dists) + + _is_sim_active = np.copy(is_sim_active) # Input is_sim_active is not longer used after this point + + # Update the set of active micro sims + for i in range(_is_sim_active.size): + if _is_sim_active[i]: # if sim is active + if self._check_for_deactivation(i, similarity_dists, _is_sim_active): + _is_sim_active[i] = False + + return _is_sim_active + + def _associate_inactive_to_active( + self, + similarity_dists: np.ndarray, + is_sim_active: np.ndarray, + sim_is_associated_to: np.ndarray) -> np.ndarray: + """ + Associate inactive micro simulations to most similar active micro simulation. + + Parameters + ---------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + is_sim_active : numpy array + 1D array having state (active or inactive) of each micro simulation + sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None + + Returns + ------- + _sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None + """ + active_ids = np.where(is_sim_active)[0] + inactive_ids = np.where(is_sim_active == False)[0] + + _sim_is_associated_to = np.copy(sim_is_associated_to) + + # Associate inactive micro sims to active micro sims + for inactive_id in inactive_ids: + dist_min = sys.float_info.max + for active_id in active_ids: + # Find most similar active sim for every inactive sim + if similarity_dists[inactive_id, active_id] < dist_min: + associated_active_id = active_id + dist_min = similarity_dists[inactive_id, active_id] - data_diff = self._similarity_measure(data) - _similarity_dists += dt * data_diff + _sim_is_associated_to[inactive_id] = associated_active_id - return _similarity_dists + return _sim_is_associated_to def _check_for_activation( self, inactive_id: int, similarity_dists: np.ndarray, - micro_sim_states: np.ndarray) -> bool: + is_sim_active: np.ndarray) -> bool: """ Function to check if an inactive simulation needs to be activated @@ -63,10 +142,10 @@ def _check_for_activation( ID of inactive simulation which is checked for activation similarity_dists : numpy array 2D array having similarity distances between each micro simulation pair - micro_sim_states : numpy array + is_sim_active : numpy array 1D array having state (active or inactive) of each micro simulation """ - active_sim_ids = np.where(micro_sim_states == 1)[0] + active_sim_ids = np.where(is_sim_active)[0] dists = similarity_dists[inactive_id, active_sim_ids] @@ -77,7 +156,7 @@ def _check_for_deactivation( self, active_id: int, similarity_dists: np.ndarray, - micro_sim_states: np.ndarray) -> bool: + is_sim_active: np.ndarray) -> bool: """ Function to check if an active simulation needs to be deactivated @@ -87,10 +166,10 @@ def _check_for_deactivation( ID of active simulation which is checked for deactivation similarity_dists : numpy array 2D array having similarity distances between each micro simulation pair - micro_sim_states : numpy array + is_sim_active : numpy array 1D array having state (active or inactive) of each micro simulation """ - active_sim_ids = np.where(micro_sim_states == 1)[0] + active_sim_ids = np.where(is_sim_active)[0] for active_id_2 in active_sim_ids: if active_id != active_id_2: # don't compare active sim to itself diff --git a/micro_manager/adaptivity/global_adaptivity.py b/micro_manager/adaptivity/global_adaptivity.py new file mode 100644 index 00000000..cc39a63a --- /dev/null +++ b/micro_manager/adaptivity/global_adaptivity.py @@ -0,0 +1,279 @@ +""" +Functionality for adaptive control of micro simulations in a global way (all-to-all comparison of micro simulations) +""" +import numpy as np +import hashlib +from copy import deepcopy +from mpi4py import MPI +from typing import Dict +from .adaptivity import AdaptivityCalculator + + +class GlobalAdaptivityCalculator(AdaptivityCalculator): + """ + This class provides functionality to compute adaptivity globally, i.e. by comparing micro simulation from all processes. + All ID variables used in the methods of this class are global IDs, unless they have *local* in their name. + """ + + def __init__( + self, + configurator, + logger, + is_sim_on_this_rank: list, + rank_of_sim: np.ndarray, + global_ids: list, + comm, + rank: int) -> None: + super().__init__(configurator, logger) + self._is_sim_on_this_rank = is_sim_on_this_rank + self._rank_of_sim = rank_of_sim + self._global_ids = global_ids + self._comm = comm + self._rank = rank + + def compute_adaptivity( + self, + dt: float, + micro_sims: list, + similarity_dists_nm1: np.ndarray, + is_sim_active_nm1: np.ndarray, + sim_is_associated_to_nm1: np.ndarray, + data_for_adaptivity: dict) -> tuple: + """ + Compute adaptivity globally based on similarity distances and micro simulation states + + Parameters + ---------- + dt : float + TODO + micro_sims : list + List of objects of class MicroProblem, which are the micro simulations + similarity_dists_nm1 : numpy array + 2D array having similarity distances between each micro simulation pair + is_sim_active_nm1 : numpy array + 1D array having state (active or inactive) of each micro simulation on this rank + sim_is_associated_to_nm1 : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None + data_for_adaptivity : dict + Dictionary with keys as names of data to be used in the similarity calculation, and values as the respective data for the micro simulations + + Results + ------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + is_sim_active : numpy array + 1D array having state (active or inactive) of each micro simulation + """ + # Gather adaptivity data from all ranks + global_data_for_adaptivity = dict() + for name in self._adaptivity_data_names.keys(): + data_as_list = self._comm.allgather(data_for_adaptivity[name]) + global_data_for_adaptivity[name] = np.concatenate((data_as_list[:]), axis=0) + + # Similarity distance matrix is calculated globally on every rank + similarity_dists = self._get_similarity_dists(dt, similarity_dists_nm1, global_data_for_adaptivity) + + is_sim_active = self._update_active_sims(similarity_dists, is_sim_active_nm1) + + is_sim_active, sim_is_associated_to = self._update_inactive_sims( + similarity_dists, is_sim_active_nm1, sim_is_associated_to_nm1, micro_sims) + + sim_is_associated_to = self._associate_inactive_to_active( + similarity_dists, is_sim_active, sim_is_associated_to) + + self._logger.info( + "{} active simulations, {} inactive simulations".format( + np.count_nonzero( + is_sim_active[self._global_ids[0]:self._global_ids[-1] + 1]), + np.count_nonzero( + is_sim_active[self._global_ids[0]:self._global_ids[-1] + 1] == False))) + + return similarity_dists, is_sim_active, sim_is_associated_to + + def communicate_micro_output( + self, + is_sim_active: np.ndarray, + sim_is_associated_to: np.ndarray, + micro_output: list) -> None: + """ + Communicate micro output from active simulation to their associated inactive simulations. P2P communication is done. + + Parameters + ---------- + micro_sims : list + List of objects of class MicroProblem, which are the micro simulations + is_sim_active : numpy array + 1D array having state (active or inactive) of each micro simulation on this rank + sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None + micro_output : list + List of dicts having individual output of each simulation. Only the active simulation outputs are entered. + """ + inactive_local_ids = np.where(is_sim_active[self._global_ids[0]:self._global_ids[-1] + 1] == False)[0] + + local_sim_is_associated_to = sim_is_associated_to[self._global_ids[0]:self._global_ids[-1] + 1] + + # Keys are global IDs of active simulations associated to inactive + # simulations on this rank. Values are global IDs of the inactive + # simulations. + active_to_inactive_map: Dict[int, list] = dict() + + for i in inactive_local_ids: + assoc_active_id = local_sim_is_associated_to[i] + # Gather global IDs of associated active simulations not on this rank for communication + if not self._is_sim_on_this_rank[assoc_active_id]: + if assoc_active_id in active_to_inactive_map: + active_to_inactive_map[assoc_active_id].append(i) + else: + active_to_inactive_map[assoc_active_id] = [i] + else: # If associated active simulation is on this rank, copy the output directly + micro_output[i] = deepcopy(micro_output[self._global_ids.index(assoc_active_id)]) + + assoc_active_ids = list(active_to_inactive_map.keys()) + + recv_reqs = self._p2p_comm(assoc_active_ids, micro_output) + + # Add received output of active sims to inactive sims on this rank + for count, req in enumerate(recv_reqs): + output = req.wait() + for local_id in active_to_inactive_map[assoc_active_ids[count]]: + micro_output[local_id] = deepcopy(output) + + def _update_inactive_sims( + self, + similarity_dists: np.ndarray, + is_sim_active: np.ndarray, + sim_is_associated_to: np.ndarray, + micro_sims: list) -> tuple: + """ + Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones + and if it is not similar to any of them, it is activated. + + Parameters + ---------- + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + is_sim_active : numpy array + 1D array having state (active or inactive) of each micro simulation + sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None + micro_sims : list + List of objects of class MicroProblem, which are the micro simulations + + Returns + ------- + _is_sim_active : numpy array + Updated 1D array having state (active or inactive) of each micro simulation + _sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None + """ + self._ref_tol = self._refine_const * np.amax(similarity_dists) + + _is_sim_active = np.copy(is_sim_active) # Input is_sim_active is not longer used after this point + _sim_is_associated_to = np.copy(sim_is_associated_to) + + # Check inactive simulations for activation and collect IDs of those to be activated + to_be_activated_ids = [] # Global IDs to be activated + for i in range(_is_sim_active.size): + if not _is_sim_active[i]: # if id is inactive + if self._check_for_activation(i, similarity_dists, _is_sim_active): + _is_sim_active[i] = True + if self._is_sim_on_this_rank[i]: + to_be_activated_ids.append(i) + + print("is_sim_active: {}, to_be_activated: {}".format(_is_sim_active, to_be_activated_ids)) + + local_sim_is_associated_to = _sim_is_associated_to[self._global_ids[0]:self._global_ids[-1] + 1] + + # Keys are global IDs of active sims not on this rank, values are lists of local and + # global IDs of inactive sims associated to the active sims which are on this rank + to_be_activated_map: Dict[int, list] = dict() + + for i in to_be_activated_ids: + # Only handle activation of simulations on this rank -- LOCAL SCOPE HERE ON + if self._is_sim_on_this_rank[i]: + to_be_activated_local_id = self._global_ids.index(i) + assoc_active_id = local_sim_is_associated_to[to_be_activated_local_id] + + if self._is_sim_on_this_rank[assoc_active_id]: # Associated active simulation is on the same rank + assoc_active_local_id = self._global_ids.index(assoc_active_id) + micro_sims[to_be_activated_local_id].set_state(micro_sims[assoc_active_local_id].get_state()) + _sim_is_associated_to[i] = -2 # Active sim cannot have an associated sim + else: # Associated active simulation is not on this rank + if assoc_active_id in to_be_activated_map: + to_be_activated_map[assoc_active_id].append(to_be_activated_local_id) + else: + to_be_activated_map[assoc_active_id] = [to_be_activated_local_id] + + sim_states_and_global_ids = [] + for sim in micro_sims: + sim_states_and_global_ids.append((sim.get_state(), sim.get_global_id())) + + recv_reqs = self._p2p_comm(list(to_be_activated_map.keys()), sim_states_and_global_ids) + + # Use received micro sims to activate the required simulations + for req in recv_reqs: + state, global_id = req.wait() + local_ids = to_be_activated_map[global_id] + for local_id in local_ids: + micro_sims[local_id].set_state(state) + _sim_is_associated_to[self._global_ids[local_id]] = -2 # Active sim cannot have an associated sim + + return _is_sim_active, _sim_is_associated_to + + def _create_tag(self, sim_id, src_rank, dest_rank): + send_hashtag = hashlib.sha256() + send_hashtag.update((str(src_rank) + str(sim_id) + str(dest_rank)).encode('utf-8')) + tag = int(send_hashtag.hexdigest()[:6], base=16) + return tag + + def _p2p_comm(self, assoc_active_ids: list, data: list) -> list: + """ + This function created sending and receiving maps for p2p communication. + + Parameters + ---------- + assoc_active_ids : list + Global IDs of active simulations which are not on this rank and are associated to the inactive simulations on this rank + """ + send_map_local: Dict[int, int] = dict() # keys are global IDs, values are rank to send to + send_map: Dict[int, list] = dict() # keys are global IDs of sims to send, values are ranks to send the sims to + recv_map: Dict[int, int] = dict() # keys are global IDs to receive, values are ranks to receive from + + for i in assoc_active_ids: + # Add simulation and its rank to receive map + recv_map[i] = self._rank_of_sim[i] + # Add simulation and this rank to local sending map + send_map_local[i] = self._rank + + # Gather information about which sims to send where, from the sending perspective + send_map_list = self._comm.allgather(send_map_local) + + for d in send_map_list: + for i, rank in d.items(): + if self._is_sim_on_this_rank[i]: + if i in send_map: + send_map[i].append(rank) + else: + send_map[i] = [rank] + + # Asynchronous send operations + send_reqs = [] + for global_id, send_ranks in send_map.items(): + local_id = self._global_ids.index(global_id) + for send_rank in send_ranks: + tag = self._create_tag(global_id, self._rank, send_rank) + req = self._comm.isend(data[local_id], dest=send_rank, tag=tag) + send_reqs.append(req) + + # Asynchronous receive operations + recv_reqs = [] + for global_id, recv_rank in recv_map.items(): + tag = self._create_tag(global_id, recv_rank, self._rank) + req = self._comm.irecv(source=recv_rank, tag=tag) + recv_reqs.append(req) + + # Wait for all non-blocking communication to complete + MPI.Request.Waitall(send_reqs) + + return recv_reqs diff --git a/micro_manager/adaptivity/local_adaptivity.py b/micro_manager/adaptivity/local_adaptivity.py index 551da16d..e38baa67 100644 --- a/micro_manager/adaptivity/local_adaptivity.py +++ b/micro_manager/adaptivity/local_adaptivity.py @@ -1,56 +1,70 @@ """ -Functionality for adaptive initialization and control of micro simulations locally within a rank (or the entire domain if the Micro Manager is run in serial) +Functionality for adaptive control of micro simulations locally within a rank (or the entire domain if the Micro Manager is run in serial) """ -import sys import numpy as np -from copy import deepcopy from .adaptivity import AdaptivityCalculator class LocalAdaptivityCalculator(AdaptivityCalculator): - def __init__(self, configurator, global_ids, number_of_local_sims) -> None: - super().__init__(configurator, global_ids) - self._number_of_local_sims = number_of_local_sims + def __init__(self, configurator, logger) -> None: + super().__init__(configurator, logger) - def update_active_micro_sims( + def compute_adaptivity( self, - similarity_dists: np.ndarray, - micro_sim_states: np.ndarray, - micro_sims: list) -> np.ndarray: + dt, + micro_sims, + similarity_dists_nm1: np.ndarray, + is_sim_active_nm1: np.ndarray, + sim_is_associated_to_nm1: np.ndarray, + data_for_adaptivity: dict): """ - Update set of active micro simulations. Active micro simulations are compared to each other - and if found similar, one of them is deactivated. + Compute adaptivity locally (within a rank) based on similarity distances and micro simulation states + Parameters ---------- - similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair - micro_sim_states : numpy array - 1D array having state (active or inactive) of each micro simulation + dt : float + Current time step micro_sims : list - List of objects of class MicroProblem, which are the micro simulations - Returns + TODO + similarity_dists_nm1 : numpy array + 2D array having similarity distances between each micro simulation pair + is_sim_active_nm1 : numpy array + 1D array having True if sim is active, False if sim is inactive + sim_is_associated_to_nm1 : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None + data_for_adaptivity : dict + TODO + + Results ------- - _micro_sim_states : numpy array - Updated 1D array having state (active or inactive) of each micro simulation + similarity_dists : numpy array + 2D array having similarity distances between each micro simulation pair + is_sim_active : numpy array + 1D array, True is sim is active, False if sim is inactive """ - self._coarse_tol = self._coarse_const * self._refine_const * np.amax(similarity_dists) + similarity_dists = self._get_similarity_dists(dt, similarity_dists_nm1, data_for_adaptivity) + + # Operation done globally if global adaptivity is chosen + is_sim_active = self._update_active_sims(similarity_dists, is_sim_active_nm1) + + is_sim_active, sim_is_associated_to = self._update_inactive_sims( + similarity_dists, is_sim_active_nm1, sim_is_associated_to_nm1, micro_sims) - _micro_sim_states = np.copy(micro_sim_states) # Input micro_sim_states is not longer used after this point + sim_is_associated_to = self._associate_inactive_to_active( + similarity_dists, is_sim_active, sim_is_associated_to) - # Update the set of active micro sims - for i in range(self._number_of_local_sims): - if _micro_sim_states[i]: # if sim is active - if self._check_for_deactivation(i, similarity_dists, _micro_sim_states): - micro_sims[i].deactivate() - _micro_sim_states[i] = 0 + self._logger.info( + "{} active simulations, {} inactive simulations".format( + np.count_nonzero(is_sim_active), np.count_nonzero(is_sim_active == False))) - return _micro_sim_states + return similarity_dists, is_sim_active, sim_is_associated_to - def update_inactive_micro_sims( + def _update_inactive_sims( self, similarity_dists: np.ndarray, - micro_sim_states: np.ndarray, - micro_sims: list) -> np.ndarray: + is_sim_active: np.ndarray, + sim_is_associated_to: np.ndarray, + micro_sims: list) -> tuple: """ Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones and if it is not similar to any of them, it is activated. @@ -58,71 +72,32 @@ def update_inactive_micro_sims( ---------- similarity_dists : numpy array 2D array having similarity distances between each micro simulation pair - micro_sim_states : numpy array + is_sim_active : numpy array 1D array having state (active or inactive) of each micro simulation + sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None micro_sims : list - List of objects of class MicroProblem, which are the micro simulations + TODO + Returns ------- - _micro_sim_states : numpy array + _is_sim_active : numpy array Updated 1D array having state (active or inactive) of each micro simulation + _sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None """ self._ref_tol = self._refine_const * np.amax(similarity_dists) - _micro_sim_states = np.copy(micro_sim_states) # Input micro_sim_states is not longer used after this point + _is_sim_active = np.copy(is_sim_active) # Input is_sim_active is not longer used after this point + _sim_is_associated_to = np.copy(sim_is_associated_to) # Update the set of inactive micro sims - for i in range(self._number_of_local_sims): - if not _micro_sim_states[i]: # if id is inactive - if self._check_for_activation(i, similarity_dists, _micro_sim_states): - associated_active_local_id = micro_sims[i].get_associated_active_local_id() - - # Get local and global ID of inactive simulation, to set it to the copied simulation later - local_id = micro_sims[i].get_local_id() - global_id = micro_sims[i].get_global_id() - - # Copy state from associated active simulation with get_state and - # set_state if available else deepcopy - if hasattr(micro_sims[associated_active_local_id], 'get_state') and \ - hasattr(micro_sims[associated_active_local_id], 'set_state'): - micro_sims[i].set_state(*micro_sims[associated_active_local_id].get_state()) - else: - micro_sims[i] = None - micro_sims[i] = deepcopy(micro_sims[associated_active_local_id]) - micro_sims[i].set_local_id(local_id) - micro_sims[i].set_global_id(global_id) - _micro_sim_states[i] = 1 - - return _micro_sim_states - - def associate_inactive_to_active( - self, - similarity_dists: np.ndarray, - micro_sim_states: np.ndarray, - micro_sims: list) -> list: - """ - Associate inactive micro simulations to most similar active micro simulation. - - Parameters - ---------- - similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair - micro_sim_states : numpy array - 1D array having state (active or inactive) of each micro simulation - micro_sims : list - List of objects of class MicroProblem, which are the micro simulations - """ - active_sim_ids = np.where(micro_sim_states == 1)[0] - inactive_sim_ids = np.where(micro_sim_states == 0)[0] - - # Associate inactive micro sims to active micro sims - for inactive_id in inactive_sim_ids: - dist_min = sys.float_info.max - for active_id in active_sim_ids: - # Find most similar active sim for every inactive sim - if similarity_dists[inactive_id, active_id] < dist_min: - associated_active_id = active_id - dist_min = similarity_dists[inactive_id, active_id] - - micro_sims[inactive_id].is_associated_to_active_sim( - associated_active_id, self._global_ids_of_local_sims[associated_active_id]) + for i in range(_is_sim_active.size): + if not _is_sim_active[i]: # if id is inactive + if self._check_for_activation(i, similarity_dists, _is_sim_active): + associated_active_local_id = _sim_is_associated_to[i] + micro_sims[i].set_state(micro_sims[associated_active_local_id].get_state()) + _is_sim_active[i] = True + _sim_is_associated_to[i] = -2 # Active sim cannot have an associated sim + + return _is_sim_active, _sim_is_associated_to diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 74465068..cb6fea63 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -8,14 +8,16 @@ import sys import precice from mpi4py import MPI -from math import exp import numpy as np import logging import time +from copy import deepcopy +from typing import Dict from .config import Config from .micro_simulation import create_micro_problem_class from .adaptivity.local_adaptivity import LocalAdaptivityCalculator +from .adaptivity.global_adaptivity import GlobalAdaptivityCalculator from .domain_decomposition import DomainDecomposer sys.path.append(os.getcwd()) @@ -60,9 +62,6 @@ def __init__(self, config_file: str) -> None: self._rank, self._size) - micro_file_name = self._config.get_micro_file_name() - self._micro_problem = getattr(__import__(micro_file_name, fromlist=["MicroSimulation"]), "MicroSimulation") - self._macro_mesh_id = self._interface.get_mesh_id(self._config.get_macro_mesh_name()) # Data names and ids of data written to preCICE @@ -84,7 +83,7 @@ def __init__(self, config_file: str) -> None: self._is_micro_solve_time_required = self._config.write_micro_solve_time() - self._local_number_of_micro_sims = None + self._local_number_of_sims = None self._global_number_of_micro_sims = None self._is_rank_empty = False self._dt = None @@ -96,10 +95,9 @@ def __init__(self, config_file: str) -> None: if self._is_adaptivity_on: self._number_of_micro_sims_for_adaptivity = 0 - self._data_for_similarity_calc = dict() + self._data_for_adaptivity = dict() self._adaptivity_type = self._config.get_adaptivity_type() - self._hist_param = self._config.get_adaptivity_hist_param() self._adaptivity_data_names = self._config.get_data_for_adaptivity() # Names of macro data to be used for adaptivity computation @@ -140,10 +138,10 @@ def initialize(self) -> None: self._dt = self._interface.initialize() self._mesh_vertex_ids, mesh_vertex_coords = self._interface.get_mesh_vertices_and_ids(self._macro_mesh_id) - self._local_number_of_micro_sims, _ = mesh_vertex_coords.shape - self._logger.info("Number of local micro simulations = {}".format(self._local_number_of_micro_sims)) + self._local_number_of_sims, _ = mesh_vertex_coords.shape + self._logger.info("Number of local micro simulations = {}".format(self._local_number_of_sims)) - if self._local_number_of_micro_sims == 0: + if self._local_number_of_sims == 0: if self._is_parallel: self._logger.info( "Rank {} has no micro simulations and hence will not do any computation.".format( @@ -156,79 +154,99 @@ def initialize(self) -> None: # Gather number of micro simulations that each rank has, because this rank needs to know how many micro # simulations have been created by previous ranks, so that it can set # the correct global IDs - self._comm.Allgather(np.array(self._local_number_of_micro_sims), nms_all_ranks) + self._comm.Allgather(np.array(self._local_number_of_sims), nms_all_ranks) # Get global number of micro simulations self._global_number_of_micro_sims = np.sum(nms_all_ranks) if self._is_adaptivity_on: - if self._adaptivity_type == "local": # Currently only local variant, global variant to follow - self._number_of_micro_sims_for_adaptivity = self._local_number_of_micro_sims - for name, is_data_vector in self._adaptivity_data_names.items(): if is_data_vector: - self._data_for_similarity_calc[name] = np.zeros( - (self._local_number_of_micro_sims, self._interface.get_dimensions())) + self._data_for_adaptivity[name] = np.zeros( + (self._local_number_of_sims, self._interface.get_dimensions())) else: - self._data_for_similarity_calc[name] = np.zeros((self._local_number_of_micro_sims)) + self._data_for_adaptivity[name] = np.zeros((self._local_number_of_sims)) # Create lists of local and global IDs sim_id = np.sum(nms_all_ranks[:self._rank]) self._global_ids_of_local_sims = [] # DECLARATION - for i in range(self._local_number_of_micro_sims): + for i in range(self._local_number_of_sims): self._global_ids_of_local_sims.append(sim_id) sim_id += 1 + self._micro_sims = [None] * self._local_number_of_sims # DECLARATION + + micro_problem = getattr( + __import__( + self._config.get_micro_file_name(), + fromlist=["MicroSimulation"]), + "MicroSimulation") + if self._is_adaptivity_on: - self._micro_sims = [None] * self._number_of_micro_sims_for_adaptivity # DECLARATION + # Create micro simulation objects + for i in range(self._local_number_of_sims): + self._micro_sims[i] = create_micro_problem_class( + micro_problem)(self._global_ids_of_local_sims[i]) + + # Create a map of micro simulation global IDs and the ranks on which they are + micro_sims_on_this_rank = np.zeros(self._local_number_of_sims, dtype=np.intc) + for i in range(self._local_number_of_sims): + micro_sims_on_this_rank[i] = self._rank + + self._rank_of_sim = np.zeros(self._global_number_of_micro_sims, dtype=np.intc) # DECLARATION + self._comm.Allgather(micro_sims_on_this_rank, self._rank_of_sim) + + self._is_sim_on_this_rank = [False] * self._global_number_of_micro_sims # DECLARATION + for i in range(self._global_number_of_micro_sims): + if self._rank_of_sim[i] == self._rank: + self._is_sim_on_this_rank[i] = True + if self._adaptivity_type == "local": self._adaptivity_controller = LocalAdaptivityCalculator( - self._config, self._global_ids_of_local_sims, self._local_number_of_micro_sims) - # If adaptivity is calculated locally, IDs to iterate over are local - for i in range(self._local_number_of_micro_sims): - self._micro_sims[i] = create_micro_problem_class( - self._micro_problem)(i, self._global_ids_of_local_sims[i]) - - micro_sim_is_on_rank = np.zeros(self._local_number_of_micro_sims) - for i in range(self._local_number_of_micro_sims): - micro_sim_is_on_rank[i] = self._rank - - self._micro_sim_is_on_rank = np.zeros(self._global_number_of_micro_sims) # DECLARATION - self._comm.Allgather(micro_sim_is_on_rank, self._micro_sim_is_on_rank) + self._config, self._logger) + self._number_of_micro_sims_for_adaptivity = self._local_number_of_sims + elif self._adaptivity_type == "global": + self._adaptivity_controller = GlobalAdaptivityCalculator( + self._config, + self._logger, + self._is_sim_on_this_rank, + self._rank_of_sim, + self._global_ids_of_local_sims, + self._comm, + self._rank) + self._number_of_micro_sims_for_adaptivity = self._global_number_of_micro_sims + + self._micro_sims_active_steps = np.zeros(self._local_number_of_sims) else: - self._micro_sims = [] # DECLARATION - for i in range(self._local_number_of_micro_sims): - self._micro_sims.append( - create_micro_problem_class( - self._micro_problem)( - i, self._global_ids_of_local_sims[i])) + for i in range(self._local_number_of_sims): + self._micro_sims[i] = ( + create_micro_problem_class(micro_problem)(self._global_ids_of_local_sims[i])) - micro_sims_output = list(range(self._local_number_of_micro_sims)) - self._micro_sims_active_steps = np.zeros(self._local_number_of_micro_sims) + micro_sims_output = list(range(self._local_number_of_sims)) # Initialize micro simulations if initialize() method exists - if hasattr(self._micro_problem, 'initialize') and callable(getattr(self._micro_problem, 'initialize')): - for counter, i in enumerate(range(self._local_number_of_micro_sims)): - micro_sims_output[counter] = self._micro_sims[i].initialize() - if micro_sims_output[counter] is not None: + if hasattr(micro_problem, 'initialize') and callable(getattr(micro_problem, 'initialize')): + for i in range(self._local_number_of_sims): + micro_sims_output[i] = self._micro_sims[i].initialize() + if micro_sims_output[i] is not None: if self._is_micro_solve_time_required: - micro_sims_output[counter]["micro_sim_time"] = 0.0 + micro_sims_output[i]["micro_sim_time"] = 0.0 if self._is_adaptivity_on: - micro_sims_output[counter]["active_state"] = 0 - micro_sims_output[counter]["active_steps"] = 0 + micro_sims_output[i]["active_state"] = 0 + micro_sims_output[i]["active_steps"] = 0 else: - micro_sims_output[counter] = dict() + micro_sims_output[i] = dict() for name, is_data_vector in self._write_data_names.items(): if is_data_vector: - micro_sims_output[counter][name] = np.zeros(self._interface.get_dimensions()) + micro_sims_output[i][name] = np.zeros(self._interface.get_dimensions()) else: - micro_sims_output[counter][name] = 0.0 + micro_sims_output[i][name] = 0.0 self._logger.info("Micro simulations with global IDs {} - {} initialized.".format( self._global_ids_of_local_sims[0], self._global_ids_of_local_sims[-1])) self._micro_sims_have_output = False - if hasattr(self._micro_problem, 'output') and callable(getattr(self._micro_problem, 'output')): + if hasattr(micro_problem, 'output') and callable(getattr(micro_problem, 'output')): self._micro_sims_have_output = True # Write initial data if required @@ -248,25 +266,28 @@ def read_data_from_precice(self) -> list: local_read_data : list List of dicts in which keys are names of data being read and the values are the data from preCICE. """ - read_data = dict() + read_data: Dict[str, list] = dict() for name in self._read_data_names.keys(): read_data[name] = [] + print("read_data 1: {}".format(read_data)) + for name, is_data_vector in self._read_data_names.items(): if is_data_vector: read_data.update({name: self._interface.read_block_vector_data( self._read_data_ids[name], self._mesh_vertex_ids)}) + print("After scalar data update: {}".format(read_data)) else: read_data.update({name: self._interface.read_block_scalar_data( self._read_data_ids[name], self._mesh_vertex_ids)}) if self._is_adaptivity_on: if name in self._adaptivity_macro_data_names: - self._data_for_similarity_calc[name] = read_data[name] + self._data_for_adaptivity[name] = read_data[name] - read_data = [dict(zip(read_data, t)) for t in zip(*read_data.values())] + print("read_data 2: {}".format(read_data)) - return read_data + return [dict(zip(read_data, t)) for t in zip(*read_data.values())] def write_data_to_precice(self, micro_sims_output: list) -> None: """ @@ -277,7 +298,7 @@ def write_data_to_precice(self, micro_sims_output: list) -> None: micro_sims_output : list List of dicts in which keys are names of data and the values are the data to be written to preCICE. """ - write_data = dict() + write_data: Dict[str, list] = dict() if not self._is_rank_empty: for name in micro_sims_output[0]: write_data[name] = [] @@ -302,54 +323,40 @@ def write_data_to_precice(self, micro_sims_output: list) -> None: self._interface.write_block_scalar_data( self._write_data_ids[dname], [], np.array([])) - def compute_adaptivity(self, similarity_dists_nm1: np.ndarray, micro_sim_states_nm1: np.ndarray): + def solve_micro_simulations(self, micro_sims_input: list) -> list: """ - Compute adaptivity locally based on similarity distances and micro simulation states from t_{n-1} + Solve all micro simulations using the data read from preCICE and assemble the micro simulations outputs in a list of dicts + format. Parameters ---------- + micro_sims_input : list + List of dicts in which keys are names of data and the values are the data which are required inputs to + solve a micro simulation. - similarity_dists_nm1 : numpy array - 2D array having similarity distances between each micro simulation pair at t_{n-1} - micro_sim_states_nm1 : numpy array - 1D array having state (active or inactive) of each micro simulation at t_{n-1} on this rank - - Results + Returns ------- - similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair at t_{n} - micro_sim_states : numpy array - 1D array having state (active or inactive) of each micro simulation at t_{n} + micro_sims_output : list + List of dicts in which keys are names of data and the values are the data of the output of the micro + simulations. """ - # Multiply old similarity distance by history term to get current distances - similarity_dists_n = exp(-self._hist_param * self._dt) * similarity_dists_nm1 + micro_sims_output = [None] * self._local_number_of_sims - for name, _ in self._adaptivity_data_names.items(): - # For global adaptivity, similarity distance matrix is calculated globally on every rank - similarity_dists_n = self._adaptivity_controller.get_similarity_dists( - self._dt, similarity_dists_n, self._data_for_similarity_calc[name]) - - micro_sim_states_n = self._adaptivity_controller.update_active_micro_sims( - similarity_dists_n, micro_sim_states_nm1, self._micro_sims) - - micro_sim_states_n = self._adaptivity_controller.update_inactive_micro_sims( - similarity_dists_n, micro_sim_states_nm1, self._micro_sims) - - self._adaptivity_controller.associate_inactive_to_active( - similarity_dists_n, micro_sim_states_n, self._micro_sims) + for count, sim in enumerate(self._micro_sims): + start_time = time.time() + micro_sims_output[count] = sim.solve(micro_sims_input[count], self._dt) + end_time = time.time() - self._logger.info( - "Number of active micro simulations = {}".format( - np.count_nonzero( - micro_sim_states_n == 1))) - self._logger.info( - "Number of inactive micro simulations = {}".format( - np.count_nonzero( - micro_sim_states_n == 0))) + if self._is_micro_solve_time_required: + micro_sims_output[count]["micro_sim_time"] = end_time - start_time - return similarity_dists_n, micro_sim_states_n + return micro_sims_output - def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.ndarray) -> list: + def solve_micro_simulations_with_adaptivity( + self, + micro_sims_input: list, + is_sim_active: np.ndarray, + sim_is_associated_to: np.ndarray) -> list: """ Solve all micro simulations using the data read from preCICE and assemble the micro simulations outputs in a list of dicts format. @@ -359,8 +366,10 @@ def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.n micro_sims_input : list List of dicts in which keys are names of data and the values are the data which are required inputs to solve a micro simulation. - micro_sim_states : numpy array + is_sim_active : numpy array 1D array having state (active or inactive) of each micro simulation + sim_is_associated_to : numpy array + 1D array with values of associated simulations of inactive simulations. Active simulations have None Returns ------- @@ -368,49 +377,51 @@ def solve_micro_simulations(self, micro_sims_input: list, micro_sim_states: np.n List of dicts in which keys are names of data and the values are the data of the output of the micro simulations. """ - active_sim_ids = np.where(micro_sim_states == 1)[0] - inactive_sim_ids = np.where(micro_sim_states == 0)[0] + if self._adaptivity_type == "global": + active_sim_ids = np.where( + is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0] + inactive_sim_ids = np.where( + is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1] == False)[0] + elif self._adaptivity_type == "local": + active_sim_ids = np.where(is_sim_active)[0] + inactive_sim_ids = np.where(is_sim_active == False)[0] - micro_sims_output = list(range(self._local_number_of_micro_sims)) + micro_sims_output = [None] * self._local_number_of_sims # Solve all active micro simulations for active_id in active_sim_ids: - # self._logger.info("Solving active micro sim [{}]".format(self._micro_sims[active_id].get_global_id())) - start_time = time.time() micro_sims_output[active_id] = self._micro_sims[active_id].solve(micro_sims_input[active_id], self._dt) end_time = time.time() - if self._is_adaptivity_on: - # Mark the micro sim as active for export - micro_sims_output[active_id]["active_state"] = 1 - micro_sims_output[active_id]["active_steps"] = self._micro_sims_active_steps[active_id] - - for name in self._adaptivity_micro_data_names: - # Collect micro sim output for adaptivity - self._data_for_similarity_calc[name][active_id] = micro_sims_output[active_id][name] + # Mark the micro sim as active for export + micro_sims_output[active_id]["active_state"] = 1 + micro_sims_output[active_id]["active_steps"] = self._micro_sims_active_steps[active_id] if self._is_micro_solve_time_required: micro_sims_output[active_id]["micro_sim_time"] = end_time - start_time # For each inactive simulation, copy data from most similar active simulation + if self._adaptivity_type == "global": + self._adaptivity_controller.communicate_micro_output(is_sim_active, sim_is_associated_to, micro_sims_output) + elif self._adaptivity_type == "local": + for inactive_id in inactive_sim_ids: + micro_sims_output[inactive_id] = deepcopy( + micro_sims_output[sim_is_associated_to[inactive_id]]) + + # Resolve micro sim output data for inactive simulations for inactive_id in inactive_sim_ids: - micro_sims_output[inactive_id] = dict() - for dname, values in micro_sims_output[self._micro_sims[inactive_id].get_associated_active_local_id()].items( - ): - micro_sims_output[inactive_id][dname] = values - - if self._is_adaptivity_on: - for name in self._adaptivity_micro_data_names: - # Collect micro sim output for adaptivity - self._data_for_similarity_calc[name][inactive_id] = micro_sims_output[inactive_id][name] - - micro_sims_output[inactive_id]["active_state"] = 0 - micro_sims_output[inactive_id]["active_steps"] = self._micro_sims_active_steps[inactive_id] + micro_sims_output[inactive_id]["active_state"] = 0 + micro_sims_output[inactive_id]["active_steps"] = self._micro_sims_active_steps[inactive_id] if self._is_micro_solve_time_required: micro_sims_output[inactive_id]["micro_sim_time"] = 0 + # Collect micro sim output for adaptivity + for i in range(self._local_number_of_sims): + for name in self._adaptivity_micro_data_names: + self._data_for_adaptivity[name][i] = micro_sims_output[i][name] + return micro_sims_output def solve(self): @@ -420,48 +431,49 @@ def solve(self): t, n = 0, 0 t_checkpoint, n_checkpoint = 0, 0 - micro_sim_states = np.ones((self._local_number_of_micro_sims)) # By default all sims are active - if self._is_adaptivity_on: similarity_dists = np.zeros( (self._number_of_micro_sims_for_adaptivity, self._number_of_micro_sims_for_adaptivity)) + # Start adaptivity calculation with all sims inactive - micro_sim_states = np.zeros((self._number_of_micro_sims_for_adaptivity)) + is_sim_active = np.array([False] * self._number_of_micro_sims_for_adaptivity) - # If all sims are inactive, activate the first one (a random choice) - self._micro_sims[0].activate() - micro_sim_states[0] = 1 + # Activate the first one (a random choice) + is_sim_active[0] = True - # All inactive sims are associated to the one active sim - for i in range(1, self._number_of_micro_sims_for_adaptivity): - self._micro_sims[i].is_associated_to_active_sim(0, self._global_ids_of_local_sims[0]) - self._micro_sims[0].is_associated_to_inactive_sims(range( - 1, self._number_of_micro_sims_for_adaptivity), self._global_ids_of_local_sims[1:self._local_number_of_micro_sims - 1]) + # Associate all sims to the one active sim + sim_is_associated_to = np.zeros((self._number_of_micro_sims_for_adaptivity), dtype=np.intc) + sim_is_associated_to[0] = -2 # An active sim does not have an associated sim similarity_dists_cp = None - micro_sim_states_cp = None - micro_sims_cp = None + is_sim_active_cp = None + sim_is_associated_to_cp = None + sim_states_cp = [None] * self._local_number_of_sims while self._interface.is_coupling_ongoing(): if self._interface.is_action_required(precice.action_write_iteration_checkpoint()): - for micro_sim in self._micro_sims: - micro_sim.save_checkpoint() + for i in range(self._local_number_of_sims): + sim_states_cp[i] = self._micro_sims[i].get_state() t_checkpoint = t n_checkpoint = n if self._is_adaptivity_on: if not self._is_adaptivity_required_in_every_implicit_iteration: - if self._adaptivity_type == "local": - similarity_dists, micro_sim_states = self.compute_adaptivity( - similarity_dists, micro_sim_states) + similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity( + self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity) # Only do checkpointing if adaptivity is computed once in every time window similarity_dists_cp = np.copy(similarity_dists) - micro_sim_states_cp = np.copy(micro_sim_states) - micro_sims_cp = self._micro_sims.copy() + is_sim_active_cp = np.copy(is_sim_active) + sim_is_associated_to_cp = np.copy(sim_is_associated_to) + + if self._adaptivity_type == "local": + active_sim_ids = np.where(is_sim_active)[0] + elif self._adaptivity_type == "global": + active_sim_ids = np.where( + is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0] - active_sim_ids = np.where(micro_sim_states == 1)[0] for active_id in active_sim_ids: self._micro_sims_active_steps[active_id] += 1 @@ -472,13 +484,22 @@ def solve(self): if self._is_adaptivity_on: if self._is_adaptivity_required_in_every_implicit_iteration: - similarity_dists, micro_sim_states = self.compute_adaptivity(similarity_dists, micro_sim_states) + similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity( + self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity) + + if self._adaptivity_type == "local": + active_sim_ids = np.where(is_sim_active)[0] + elif self._adaptivity_type == "global": + active_sim_ids = np.where( + is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0] - active_sim_ids = np.where(micro_sim_states == 1)[0] for active_id in active_sim_ids: self._micro_sims_active_steps[active_id] += 1 - micro_sims_output = self.solve_micro_simulations(micro_sims_input, micro_sim_states) + micro_sims_output = self.solve_micro_simulations_with_adaptivity( + micro_sims_input, is_sim_active, sim_is_associated_to) + else: + micro_sims_output = self.solve_micro_simulations(micro_sims_input) self.write_data_to_precice(micro_sims_output) @@ -489,16 +510,16 @@ def solve(self): # Revert all micro simulations to checkpoints if required if self._interface.is_action_required(precice.action_read_iteration_checkpoint()): - for micro_sim in self._micro_sims: - micro_sim.reload_checkpoint() + for i in range(self._local_number_of_sims): + self._micro_sims[i].set_state(sim_states_cp[i]) n = n_checkpoint t = t_checkpoint if self._is_adaptivity_on: if not self._is_adaptivity_required_in_every_implicit_iteration: similarity_dists = np.copy(similarity_dists_cp) - micro_sim_states = np.copy(micro_sim_states_cp) - self._micro_sims = micro_sims_cp.copy() + is_sim_active = np.copy(is_sim_active_cp) + sim_is_associated_to = np.copy(sim_is_associated_to_cp) self._interface.mark_action_fulfilled( precice.action_read_iteration_checkpoint()) diff --git a/micro_manager/micro_simulation.py b/micro_manager/micro_simulation.py index e6c9b16a..102eb16c 100644 --- a/micro_manager/micro_simulation.py +++ b/micro_manager/micro_simulation.py @@ -18,89 +18,14 @@ def create_micro_problem_class(base_micro_simulation): Definition of class MicroSimulation defined in this function. """ class MicroSimulation(base_micro_simulation): - def __init__(self, local_id, global_id): + def __init__(self, global_id): base_micro_simulation.__init__(self) - self._local_id = local_id self._global_id = global_id - self._is_active = False # Simulation is created in an inactive state - - # Only defined when simulation is inactive - self._associated_active_local_id = None - self._associated_active_global_id = None - - # Only defined when simulation is active - self._associated_inactive_local_ids = None - self._associated_inactive_global_ids = None - - def get_local_id(self) -> int: - return self._local_id def get_global_id(self) -> int: return self._global_id - def set_local_id(self, local_id) -> None: - self._local_id = local_id - def set_global_id(self, global_id) -> None: self._global_id = global_id - def activate(self) -> None: - self._is_active = True - - def deactivate(self) -> None: - self._is_active = False - - def is_active(self) -> bool: - return self._is_active - - def is_associated_to_active_sim(self, similar_active_local_id: int, similar_active_global_id: int) -> None: - assert not self._is_active, "Micro simulation {} is active and hence cannot be associated to another active simulation".format( - self._global_id) - self._associated_active_local_id = similar_active_local_id - self._associated_active_global_id = similar_active_global_id - - def get_associated_active_local_id(self) -> int: - assert not self._is_active, "Micro simulation {} is active and hence cannot have an associated active local ID".format( - self._global_id) - return self._associated_active_local_id - - def get_associated_active_global_id(self) -> int: - assert not self._is_active, "Micro simulation {} is active and hence cannot have an associated active global ID".format( - self._global_id) - return self._associated_active_global_id - - def is_associated_to_inactive_sim(self, similar_inactive_local_id: int, - similar_inactive_global_id: int) -> None: - assert self._is_active, "Micro simulation {} is inactive and hence cannot be associated to an inactive simulation".format( - self._global_id) - self._associated_inactive_local_ids.append(similar_inactive_local_id) - self._associated_inactive_global_ids.append(similar_inactive_global_id) - - def is_associated_to_inactive_sims(self, similar_inactive_local_ids: list, - similar_inactive_global_ids: list) -> None: - assert self._is_active, "Micro simulation {} is inactive and hence cannot be associated to inactive simulations".format( - self._global_id) - self._associated_inactive_local_ids = similar_inactive_local_ids - self._associated_inactive_global_ids = similar_inactive_global_ids - - def get_associated_inactive_local_id(self) -> int: - assert self._is_active, "Micro simulation {} is inactive and hence cannot have an associated inactive local ID".format( - self._global_id) - return self._associated_inactive_local_ids[0] - - def get_associated_inactive_global_id(self) -> int: - assert self._is_active, "Micro simulation {} is inactive and hence cannot have an associated inactive global ID".format( - self._global_id) - return self._associated_inactive_global_ids[0] - - def get_associated_inactive_local_ids(self) -> list: - assert self._is_active, "Micro simulation {} is inactive and hence cannot have associated inactive local IDs".format( - self._global_id) - return self._associated_inactive_local_ids - - def get_associated_inactive_global_ids(self) -> list: - assert self._is_active, "Micro simulation {} is active and hence cannot have associated inactive global IDs".format( - self._global_id) - return self._associated_inactive_global_ids - return MicroSimulation diff --git a/tests/integration/test_unit_cube_dummy/clean-test.sh b/tests/integration/test_unit_cube_dummy/clean-test.sh index 866dffb5..5a0def56 100755 --- a/tests/integration/test_unit_cube_dummy/clean-test.sh +++ b/tests/integration/test_unit_cube_dummy/clean-test.sh @@ -7,3 +7,4 @@ rm -fv *.out rm -fv *.err rm -fv output/*.vtu rm -fv output/*.pvtu +rm -r -fv __pycache__ diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity-parallel.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity-parallel.json new file mode 100644 index 00000000..4edbd391 --- /dev/null +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity-parallel.json @@ -0,0 +1,21 @@ +{ + "micro_file_name": "micro_dummy", + "coupling_params": { + "config_file_name": "precice-config.xml", + "macro_mesh_name": "macro-cube-mesh", + "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, + "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0, 1, 0, 1, 0, 1], + "decomposition": [2, 1, 1], + "adaptivity": { + "type": "global", + "data": ["macro-scalar-data", "micro-vector-data"], + "history_param": 0.5, + "coarsening_constant": 0.3, + "refining_constant": 0.4, + "every_implicit_iteration": "True" + } + } +} diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity.json new file mode 100644 index 00000000..808bcae4 --- /dev/null +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity.json @@ -0,0 +1,20 @@ +{ + "micro_file_name": "micro_dummy", + "coupling_params": { + "config_file_name": "precice-config.xml", + "macro_mesh_name": "macro-cube-mesh", + "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, + "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} + }, + "simulation_params": { + "macro_domain_bounds": [0, 1, 0, 1, 0, 1], + "adaptivity": { + "type": "global", + "data": ["macro-scalar-data", "macro-vector-data"], + "history_param": 0.5, + "coarsening_constant": 0.3, + "refining_constant": 0.4, + "every_implicit_iteration": "True" + } + } +} diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json b/tests/integration/test_unit_cube_dummy/micro-manager-config-local-adaptivity.json similarity index 90% rename from tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json rename to tests/integration/test_unit_cube_dummy/micro-manager-config-local-adaptivity.json index 99392c61..f6884e83 100644 --- a/tests/integration/test_unit_cube_dummy/micro-manager-config-adaptivity.json +++ b/tests/integration/test_unit_cube_dummy/micro-manager-config-local-adaptivity.json @@ -16,8 +16,5 @@ "refining_constant": 0.4, "every_implicit_iteration": "True" } - }, - "diagnostics": { - "output_micro_sim_solve_time": "True" } } diff --git a/tests/integration/test_unit_cube_dummy/micro_dummy.py b/tests/integration/test_unit_cube_dummy/micro_dummy.py index 90807589..59bef26c 100644 --- a/tests/integration/test_unit_cube_dummy/micro_dummy.py +++ b/tests/integration/test_unit_cube_dummy/micro_dummy.py @@ -21,14 +21,15 @@ def initialize(self): def solve(self, macro_data, dt): assert dt != 0 - self._micro_vector_data = macro_data["macro-vector-data"] + 1 - self._micro_scalar_data = macro_data["macro-scalar-data"] + 1 + self._micro_vector_data = macro_data["macro-vector-data"] + self._micro_scalar_data = macro_data["macro-scalar-data"] return {"micro-scalar-data": self._micro_scalar_data, "micro-vector-data": self._micro_vector_data} - def save_checkpoint(self): - self._checkpoint = self._micro_scalar_data + def get_state(self): + return [self._micro_scalar_data, self._micro_vector_data] - def reload_checkpoint(self): - self._micro_scalar_data = self._checkpoint + def set_state(self, state): + self._micro_scalar_data = state[0] + self._micro_vector_data = state[0] diff --git a/tests/integration/test_unit_cube_dummy/precice-config.xml b/tests/integration/test_unit_cube_dummy/precice-config.xml index a5072746..e98d801f 100644 --- a/tests/integration/test_unit_cube_dummy/precice-config.xml +++ b/tests/integration/test_unit_cube_dummy/precice-config.xml @@ -35,7 +35,7 @@ - + @@ -50,8 +50,8 @@ - - + + diff --git a/tests/integration/test_unit_cube_dummy/unit_cube_macro.py b/tests/integration/test_unit_cube_dummy/unit_cube_macro.py index 5d607c06..1e65ac43 100644 --- a/tests/integration/test_unit_cube_dummy/unit_cube_macro.py +++ b/tests/integration/test_unit_cube_dummy/unit_cube_macro.py @@ -12,6 +12,8 @@ def main(): n = n_checkpoint = 0 t = t_checkpoint = 0 + t_end = 10 + # preCICE setup interface = precice.Interface("macro-cube", "precice-config.xml", 0, 1) @@ -24,7 +26,7 @@ def main(): write_data_names = {"macro-scalar-data": 0, "macro-vector-data": 1} # Coupling mesh - unit cube with 5 points in each direction - np_axis = 5 + np_axis = 2 x_coords, y_coords, z_coords = np.meshgrid( np.linspace(0, 1, np_axis), np.linspace(0, 1, np_axis), @@ -37,8 +39,7 @@ def main(): write_scalar_data = np.zeros(nv) write_vector_data = np.zeros((nv, interface.get_dimensions())) - scalar_value = 1.0 - vector_value = [2.0, 3.0, 4.0] + # Define unit cube coordinates for z in range(np_axis): for y in range(np_axis): for x in range(np_axis): @@ -46,6 +47,14 @@ def main(): coords[n, 0] = x_coords[x, y, z] coords[n, 1] = y_coords[x, y, z] coords[n, 2] = z_coords[x, y, z] + + # Define initial data to write to preCICE + scalar_value = 1.0 + vector_value = [2.0, 3.0, 4.0] + for z in range(np_axis): + for y in range(np_axis): + for x in range(np_axis): + n = x + y * np_axis + z * np_axis * np_axis write_scalar_data[n] = scalar_value write_vector_data[n, 0] = vector_value[0] write_vector_data[n, 1] = vector_value[1] @@ -99,6 +108,19 @@ def main(): write_scalar_data = read_scalar_data + 1 write_vector_data = read_vector_data + 1 + # Define new data to write to preCICE midway through the simulation + if t == t_end / 2: + scalar_value = 1.0 + vector_value = [2.0, 3.0, 4.0] + for z in range(np_axis): + for y in range(np_axis): + for x in range(np_axis): + n = x + y * np_axis + z * np_axis * np_axis + write_scalar_data[n] = scalar_value + write_vector_data[n, 0] = vector_value[0] + write_vector_data[n, 1] = vector_value[1] + write_vector_data[n, 2] = vector_value[2] + # Write data to preCICE for name, dim in write_data_names.items(): if dim == 0: diff --git a/tests/unit/micro-manager-unit-test-config.json b/tests/unit/micro-manager-config.json similarity index 70% rename from tests/unit/micro-manager-unit-test-config.json rename to tests/unit/micro-manager-config.json index ccc829d7..42a4181f 100644 --- a/tests/unit/micro-manager-unit-test-config.json +++ b/tests/unit/micro-manager-config.json @@ -1,8 +1,8 @@ { "micro_file_name": "test_micro_manager", "coupling_params": { - "config_file_name": "./precice-config.xml", - "macro_mesh_name": "macro-mesh", + "config_file_name": "dummy-config.xml", + "macro_mesh_name": "dummy-macro-mesh", "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"}, "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"} }, @@ -14,11 +14,12 @@ "history_param": 0.5, "coarsening_constant": 0.3, "refining_constant": 0.4, - "every_implicit_iteration": "False" + "every_implicit_iteration": "False", + "similarity_measure": "L1" } }, "diagnostics": { - "micro_output_n": 10, - "output_micro_sim_solve_time": "True" + "output_micro_sim_solve_time": "True", + "micro_output_n": 10 } } diff --git a/tests/unit/micro-manager-unit-test-adaptivity-config.json b/tests/unit/micro-manager-unit-test-adaptivity-config.json deleted file mode 100644 index cc04492b..00000000 --- a/tests/unit/micro-manager-unit-test-adaptivity-config.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "micro_file_name": "dummy", - "coupling_params": { - "config_file_name": "dummy", - "macro_mesh_name": "dummy", - "read_data_names": {}, - "write_data_names": {} - }, - "simulation_params": { - "macro_domain_bounds": [], - "adaptivity": { - "type": "local", - "data": [], - "history_param": 0.5, - "coarsening_constant": 0.3, - "refining_constant": 0.4, - "every_implicit_iteration": "False", - "similarity_measure": "L1" - } - } -} diff --git a/tests/unit/test_adaptivity.py b/tests/unit/test_adaptivity.py index fab61063..d91765bd 100644 --- a/tests/unit/test_adaptivity.py +++ b/tests/unit/test_adaptivity.py @@ -1,15 +1,16 @@ from unittest import TestCase -from micro_manager.adaptivity.local_adaptivity import LocalAdaptivityCalculator +from unittest.mock import MagicMock from micro_manager.adaptivity.adaptivity import AdaptivityCalculator +from micro_manager.adaptivity.local_adaptivity import LocalAdaptivityCalculator +from micro_manager.adaptivity.global_adaptivity import GlobalAdaptivityCalculator from micro_manager.config import Config import numpy as np +from math import exp -class TestAdaptivity(TestCase): +class TestLocalAdaptivity(TestCase): def setUp(self): - self._adaptivity_controller = LocalAdaptivityCalculator( - Config("micro-manager-unit-test-adaptivity-config.json"), range(5), 5) self._number_of_sims = 5 self._dt = 0.1 self._dim = 3 @@ -45,61 +46,137 @@ def setUp(self): self._coarse_const = 0.5 self._coarse_tol = 0.2 - def test_get_similarity_dists(self): - expected_similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) + self._data_diff = np.zeros((self._number_of_sims, self._number_of_sims)) for i in range(self._number_of_sims): for j in range(self._number_of_sims): - similarity_dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) - similarity_dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) + dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) + dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) for d in range(self._dim): - similarity_dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) - similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) - expected_similarity_dists[i, j] = self._dt * similarity_dist + dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) + dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) + self._data_diff[i, j] = dist - actual_similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) - actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( - self._dt, actual_similarity_dists, self._micro_scalar_data) - actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( - self._dt, actual_similarity_dists, self._micro_vector_data) - actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( - self._dt, actual_similarity_dists, self._macro_scalar_data) - actual_similarity_dists = self._adaptivity_controller.get_similarity_dists( - self._dt, actual_similarity_dists, self._macro_vector_data) + self._similarity_dists = self._dt * self._data_diff - self.assertTrue(np.array_equal(expected_similarity_dists, actual_similarity_dists)) - - def test_update_active_micro_sims(self): - # Third and fifth micro sim are active, rest are deactivate - expected_micro_sim_states = np.array([0, 0, 1, 0, 1]) + def test_get_similarity_dists(self): + """ + Test base functionality of calculating the similarity distance matrix + """ + configurator = MagicMock() + configurator.get_adaptivity_similarity_measure = MagicMock(return_value='L1') + adaptivity_controller = AdaptivityCalculator(configurator, logger=MagicMock()) + adaptivity_controller._hist_param = 0.5 + adaptivity_controller._adaptivity_data_names = [ + "micro-scalar-data", + "micro-vector-data", + "macro-scalar-data", + "macro-vector-data"] similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) - for i in range(self._number_of_sims): - for j in range(self._number_of_sims): - similarity_dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) - similarity_dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) - for d in range(self._dim): - similarity_dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) - similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) - similarity_dists[i, j] = self._dt * similarity_dist - actual_micro_sim_states = np.array([1, 1, 1, 1, 1]) # Activate all micro sims before calling functionality + adaptivity_data = dict() + adaptivity_data["micro-scalar-data"] = self._micro_scalar_data + adaptivity_data["micro-vector-data"] = self._micro_vector_data + adaptivity_data["macro-scalar-data"] = self._macro_scalar_data + adaptivity_data["macro-vector-data"] = self._macro_vector_data + + similarity_dists = adaptivity_controller._get_similarity_dists( + self._dt, self._similarity_dists, adaptivity_data) + + expected_similarity_dists = exp(-adaptivity_controller._hist_param * self._dt) * \ + self._similarity_dists + self._dt * self._data_diff + + self.assertTrue(np.array_equal(expected_similarity_dists, similarity_dists)) + + def test_update_active_sims(self): + """ + Test base functionality of updating active simulations + """ + configurator = MagicMock() + configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") + adaptivity_controller = AdaptivityCalculator(configurator, logger=MagicMock()) + adaptivity_controller._refine_const = 0.4 + adaptivity_controller._coarse_const = 0.3 + adaptivity_controller._adaptivity_data_names = ["macro-scalar-data", "macro-vector-data"] + + # Third and fifth micro sim are active, rest are inactive + expected_is_sim_active = np.array([False, False, True, False, True]) + + is_sim_active = np.array([True, True, True, True, True]) # Activate all micro sims before calling functionality class MicroSimulation(): - def deactivate(self): - pass + pass dummy_micro_sims = [] - for i in range(self._number_of_sims): + for _ in range(self._number_of_sims): dummy_micro_sims.append(MicroSimulation()) - actual_micro_sim_states = self._adaptivity_controller.update_active_micro_sims( - similarity_dists, actual_micro_sim_states, dummy_micro_sims) + is_sim_active = adaptivity_controller._update_active_sims( + self._similarity_dists, is_sim_active) + + self.assertTrue(np.array_equal(expected_is_sim_active, is_sim_active)) + + def test_associate_active_to_inactive(self): + configurator = MagicMock() + configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") + adaptivity_controller = LocalAdaptivityCalculator(configurator, logger=MagicMock()) + adaptivity_controller._refine_const = 0.4 + adaptivity_controller._coarse_const = 0.3 + adaptivity_controller._adaptivity_data_names = ["macro-scalar-data", "macro-vector-data"] + + is_sim_active = np.array([True, False, False, True, False]) + expected_sim_is_associated_to = np.array([-2, 0, 0, -2, 3]) + + class MicroSimulation(): + def __init__(self, global_id): + self._global_id = global_id + + def get_global_id(self): + return self._global_id + + dummy_micro_sims = [] + for i in range(self._number_of_sims): + dummy_micro_sims.append(MicroSimulation(i)) + + sim_is_associated_to = np.array([-2, -2, -2, -2, -2]) + + sim_is_associated_to = adaptivity_controller._associate_inactive_to_active( + self._similarity_dists, is_sim_active, sim_is_associated_to) + + self.assertTrue(np.array_equal(expected_sim_is_associated_to, sim_is_associated_to)) + + def test_adaptivity_norms(self): + calc = AdaptivityCalculator(Config('micro-manager-config.json'), 0) + + fake_data = np.array([[1], [2], [3]]) + self.assertTrue(np.allclose(calc._l1(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) + # norm taken over last axis -> same as before + self.assertTrue(np.allclose(calc._l2(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) + self.assertTrue(np.allclose(calc._l1rel(fake_data), np.array( + [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) + self.assertTrue(np.allclose(calc._l2rel(fake_data), np.array( + [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) + + fake_2d_data = np.array([[1, 2], [3, 4]]) + self.assertTrue(np.allclose(calc._l1(fake_2d_data), np.array([[0, 4], [4, 0]]))) + self.assertTrue(np.allclose(calc._l2(fake_2d_data), np.array([[0, np.sqrt((1 - 3)**2 + (2 - 4)**2)], + [np.sqrt((1 - 3)**2 + (2 - 4)**2), 0]]))) + self.assertTrue(np.allclose(calc._l1rel(fake_2d_data), np.array( + [[0, abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4))], [abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4)), 0]]))) + self.assertTrue(np.allclose(calc._l2rel(fake_2d_data), np.array([[0, np.sqrt( + (1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2)], [np.sqrt((1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2), 0]]))) - self.assertTrue(np.array_equal(expected_micro_sim_states, actual_micro_sim_states)) + def test_update_inactive_sims_local_adaptivity(self): + configurator = MagicMock() + configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") + adaptivity_controller = LocalAdaptivityCalculator(configurator, logger=MagicMock()) + adaptivity_controller._refine_const = 0.4 + adaptivity_controller._coarse_const = 0.3 + adaptivity_controller._adaptivity_data_names = ["macro-scalar-data", "macro-vector-data"] - def test_update_inactive_micro_sims(self): # Third and fifth micro sim are active, rest are deactivate - expected_micro_sim_states = np.array([0, 1, 0, 1, 0]) + expected_is_sim_active = np.array([True, False, False, True, False]) + expected_sim_is_associated_to = np.array([-2, 0, 0, -2, 3]) similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) for i in range(self._number_of_sims): @@ -111,85 +188,93 @@ def test_update_inactive_micro_sims(self): similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) similarity_dists[i, j] = self._dt * similarity_dist - actual_micro_sim_states = np.array([0, 1, 0, 0, 0]) # Activate all micro sims before calling functionality + is_sim_active = np.array([True, False, False, False, False]) + sim_is_associated_to = np.array([-2, 0, 0, 0, 3]) class MicroSimulation(): - def activate(self): - pass - - def get_local_id(self): - return 1 - def get_global_id(self): return 1 - def set_local_id(self, local_id): + def set_global_id(self, global_id): pass - def set_global_id(self, global_id): + def set_state(self, state): pass - def get_associated_active_local_id(self): - return 1 + def get_state(self): + pass dummy_micro_sims = [] for i in range(self._number_of_sims): dummy_micro_sims.append(MicroSimulation()) - actual_micro_sim_states = self._adaptivity_controller.update_inactive_micro_sims( - similarity_dists, actual_micro_sim_states, dummy_micro_sims) - - self.assertTrue(np.array_equal(expected_micro_sim_states, actual_micro_sim_states)) + is_sim_active, sim_is_associated_to = adaptivity_controller._update_inactive_sims( + similarity_dists, is_sim_active, sim_is_associated_to, dummy_micro_sims) + + self.assertTrue(np.array_equal(expected_is_sim_active, is_sim_active)) + self.assertTrue(np.array_equal(expected_sim_is_associated_to, sim_is_associated_to)) + + def test_update_inactive_sims_global_adaptivity(self): + is_sim_on_this_rank = [False, False, False, True, True] # from the perspective of rank 0 + rank_of_sim = [0, 0, 0, 1, 1] + global_ids = [0, 1, 2, 3, 4] + configurator = MagicMock() + configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") + adaptivity_controller = GlobalAdaptivityCalculator( + configurator, + MagicMock(), + is_sim_on_this_rank, + rank_of_sim, + global_ids, + comm=MagicMock(), + rank=1) + adaptivity_controller._refine_const = 0.4 + adaptivity_controller._coarse_const = 0.3 - def test_associate_active_to_inactive(self): - micro_sim_states = np.array([0, 0, 1, 0, 1]) + # Third and fifth micro sim are active, rest are deactivate + expected_is_sim_active = np.array([True, False, False, True, False]) + expected_sim_is_associated_to = np.array([-2, 0, 0, -2, 3]) - similarity_dists = np.zeros((self._number_of_sims, self._number_of_sims)) - for i in range(self._number_of_sims): - for j in range(self._number_of_sims): - similarity_dist = abs(self._micro_scalar_data[i] - self._micro_scalar_data[j]) - similarity_dist += abs(self._macro_scalar_data[i] - self._macro_scalar_data[j]) - for d in range(self._dim): - similarity_dist += abs(self._micro_vector_data[i, d] - self._micro_vector_data[j, d]) - similarity_dist += abs(self._macro_vector_data[i, d] - self._macro_vector_data[j, d]) - similarity_dists[i, j] = self._dt * similarity_dist + is_sim_active = np.array([True, False, False, False, False]) + sim_is_associated_to = np.array([-2, 0, 0, 0, 3]) class MicroSimulation(): - def is_associated_to_active_sim(self, local_id, global_id): - self._associated_active_local_id = local_id - self._associated_active_global_id = global_id + def get_global_id(self): + return 1 - def get_associated_active_local_id(self): - return self._associated_active_local_id + def set_global_id(self, global_id): + pass + + def set_state(self, state): + pass + + def get_state(self): + pass + + class fake_MPI_request(): + def __init__(self, state, global_id) -> None: + self._state = state + self._global_id = global_id + + def wait(self): + return self._state, self._global_id + + fake_requests = [fake_MPI_request(None, 0)] + + adaptivity_controller._p2p_comm = MagicMock(return_value=fake_requests) dummy_micro_sims = [] for i in range(self._number_of_sims): dummy_micro_sims.append(MicroSimulation()) - self._adaptivity_controller.associate_inactive_to_active(similarity_dists, micro_sim_states, dummy_micro_sims) + is_sim_active, sim_is_associated_to = adaptivity_controller._update_inactive_sims( + self._similarity_dists, is_sim_active, sim_is_associated_to, dummy_micro_sims) - self.assertEqual(dummy_micro_sims[0].get_associated_active_local_id(), 2) - self.assertEqual(dummy_micro_sims[1].get_associated_active_local_id(), 2) - self.assertEqual(dummy_micro_sims[3].get_associated_active_local_id(), 4) + self.assertTrue(np.array_equal(expected_is_sim_active, is_sim_active)) + self.assertTrue(np.array_equal(expected_sim_is_associated_to, sim_is_associated_to)) - def test_adaptivity_norms(self): - c = Config('micro-manager-unit-test-adaptivity-config.json') - calc = AdaptivityCalculator(c, [0, 1, 2, 3, 4]) + def test_communicate_micro_output(self): + pass - fake_data = np.array([[1], [2], [3]]) - self.assertTrue(np.allclose(calc._l1(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) - # norm taken over last axis -> same as before - self.assertTrue(np.allclose(calc._l2(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) - self.assertTrue(np.allclose(calc._l1rel(fake_data), np.array( - [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) - self.assertTrue(np.allclose(calc._l2rel(fake_data), np.array( - [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) - - fake_2d_data = np.array([[1, 2], [3, 4]]) - self.assertTrue(np.allclose(calc._l1(fake_2d_data), np.array([[0, 4], [4, 0]]))) - self.assertTrue(np.allclose(calc._l2(fake_2d_data), np.array([[0, np.sqrt((1 - 3)**2 + (2 - 4)**2)], - [np.sqrt((1 - 3)**2 + (2 - 4)**2), 0]]))) - self.assertTrue(np.allclose(calc._l1rel(fake_2d_data), np.array( - [[0, abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4))], [abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4)), 0]]))) - self.assertTrue(np.allclose(calc._l2rel(fake_2d_data), np.array([[0, np.sqrt( - (1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2)], [np.sqrt((1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2), 0]]))) + def test_p2p_comm(self): + pass diff --git a/tests/unit/test_micro_manager.py b/tests/unit/test_micro_manager.py index 04f18171..e2dede53 100644 --- a/tests/unit/test_micro_manager.py +++ b/tests/unit/test_micro_manager.py @@ -35,15 +35,18 @@ def setUp(self): self.macro_bounds = [0.0, 25.0, 0.0, 25.0, 0.0, 25.0] def test_micromanager_constructor(self): - manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') + manager = micro_manager.MicroManager('micro-manager-config.json') + self.assertListEqual(manager._macro_bounds, self.macro_bounds) self.assertDictEqual(manager._read_data_names, self.fake_read_data_names) self.assertDictEqual(self.fake_write_data_names, manager._write_data_names) self.assertEqual(manager._micro_n_out, 10) def test_initialize(self): - manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') + manager = micro_manager.MicroManager('micro-manager-config.json') + manager.initialize() + self.assertEqual(manager._dt, 0.1) # from Interface.initialize self.assertEqual(manager._global_number_of_micro_sims, 4) self.assertListEqual(manager._macro_bounds, self.macro_bounds) @@ -54,31 +57,35 @@ def test_initialize(self): self.assertDictEqual(self.fake_write_data_names, manager._write_data_names) def test_read_write_data_from_precice(self): - manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') + manager = micro_manager.MicroManager('micro-manager-config.json') + manager.write_data_to_precice(self.fake_write_data) read_data = manager.read_data_from_precice() - for data, fake_data in zip(read_data, self.fake_write_data): + + print("read_data: {}".format(read_data)) + + for data, fake_data in zip(read_data, self.fake_read_data): self.assertEqual(data["macro-scalar-data"], 1) self.assertListEqual(data["macro-vector-data"].tolist(), - fake_data["micro-vector-data"].tolist()) + fake_data["macro-vector-data"].tolist()) def test_solve_mico_sims(self): - manager = micro_manager.MicroManager('micro-manager-unit-test-config.json') - manager._local_number_of_micro_sims = 4 + manager = micro_manager.MicroManager('micro-manager-config.json') + manager._local_number_of_sims = 4 manager._micro_sims = [MicroSimulation() for _ in range(4)] manager._micro_sims_active_steps = np.zeros(4, dtype=np.int32) - micro_sims_output = manager.solve_micro_simulations(self.fake_read_data, np.array([True, True, True, True])) + micro_sims_output = manager.solve_micro_simulations(self.fake_read_data) for data, fake_data in zip(micro_sims_output, self.fake_write_data): self.assertEqual(data["micro-scalar-data"], 2) self.assertListEqual(data["micro-vector-data"].tolist(), (fake_data["micro-vector-data"] + 1).tolist()) def test_config(self): - config = micro_manager.Config('micro-manager-unit-test-config.json') + config = micro_manager.Config('micro-manager-config.json') - self.assertEqual(config._config_file_name.split("/")[-1], "precice-config.xml") + self.assertEqual(config._config_file_name.split("/")[-1], "dummy-config.xml") self.assertEqual(config._micro_file_name, "test_micro_manager") - self.assertEqual(config._macro_mesh_name, "macro-mesh") + self.assertEqual(config._macro_mesh_name, "dummy-macro-mesh") self.assertEqual(config._micro_output_n, 10) self.assertDictEqual(config._read_data_names, self.fake_read_data_names) self.assertDictEqual(self.fake_write_data_names, config._write_data_names) From b0ae05db61e9e85d558be78c80608117117d63fc Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 7 Jul 2023 11:39:37 +0200 Subject: [PATCH 63/87] Remove print statements from debugging and rename class creation functionality --- micro_manager/micro_manager.py | 22 ++++++++-------------- micro_manager/micro_simulation.py | 18 ++++++++++-------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index cb6fea63..7cd0f92f 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -15,7 +15,7 @@ from typing import Dict from .config import Config -from .micro_simulation import create_micro_problem_class +from .micro_simulation import create_simulation_class from .adaptivity.local_adaptivity import LocalAdaptivityCalculator from .adaptivity.global_adaptivity import GlobalAdaptivityCalculator from .domain_decomposition import DomainDecomposer @@ -83,10 +83,10 @@ def __init__(self, config_file: str) -> None: self._is_micro_solve_time_required = self._config.write_micro_solve_time() - self._local_number_of_sims = None - self._global_number_of_micro_sims = None + self._local_number_of_sims = 0 + self._global_number_of_micro_sims = 0 self._is_rank_empty = False - self._dt = None + self._dt = 0 self._mesh_vertex_ids = None # IDs of macro vertices as set by preCICE self._micro_n_out = self._config.get_micro_output_n() @@ -95,7 +95,7 @@ def __init__(self, config_file: str) -> None: if self._is_adaptivity_on: self._number_of_micro_sims_for_adaptivity = 0 - self._data_for_adaptivity = dict() + self._data_for_adaptivity: Dict[str, np.ndarray] = dict() self._adaptivity_type = self._config.get_adaptivity_type() self._adaptivity_data_names = self._config.get_data_for_adaptivity() @@ -175,6 +175,7 @@ def initialize(self) -> None: sim_id += 1 self._micro_sims = [None] * self._local_number_of_sims # DECLARATION + micro_sims_output = [None] * self._local_number_of_sims micro_problem = getattr( __import__( @@ -185,7 +186,7 @@ def initialize(self) -> None: if self._is_adaptivity_on: # Create micro simulation objects for i in range(self._local_number_of_sims): - self._micro_sims[i] = create_micro_problem_class( + self._micro_sims[i] = create_simulation_class( micro_problem)(self._global_ids_of_local_sims[i]) # Create a map of micro simulation global IDs and the ranks on which they are @@ -220,9 +221,7 @@ def initialize(self) -> None: else: for i in range(self._local_number_of_sims): self._micro_sims[i] = ( - create_micro_problem_class(micro_problem)(self._global_ids_of_local_sims[i])) - - micro_sims_output = list(range(self._local_number_of_sims)) + create_simulation_class(micro_problem)(self._global_ids_of_local_sims[i])) # Initialize micro simulations if initialize() method exists if hasattr(micro_problem, 'initialize') and callable(getattr(micro_problem, 'initialize')): @@ -270,13 +269,10 @@ def read_data_from_precice(self) -> list: for name in self._read_data_names.keys(): read_data[name] = [] - print("read_data 1: {}".format(read_data)) - for name, is_data_vector in self._read_data_names.items(): if is_data_vector: read_data.update({name: self._interface.read_block_vector_data( self._read_data_ids[name], self._mesh_vertex_ids)}) - print("After scalar data update: {}".format(read_data)) else: read_data.update({name: self._interface.read_block_scalar_data( self._read_data_ids[name], self._mesh_vertex_ids)}) @@ -285,8 +281,6 @@ def read_data_from_precice(self) -> list: if name in self._adaptivity_macro_data_names: self._data_for_adaptivity[name] = read_data[name] - print("read_data 2: {}".format(read_data)) - return [dict(zip(read_data, t)) for t in zip(*read_data.values())] def write_data_to_precice(self, micro_sims_output: list) -> None: diff --git a/micro_manager/micro_simulation.py b/micro_manager/micro_simulation.py index 102eb16c..df505bb3 100644 --- a/micro_manager/micro_simulation.py +++ b/micro_manager/micro_simulation.py @@ -1,11 +1,13 @@ """ -Functionality to create MicroSimulation class objects which inherit from user provided base_micro_simulation class. +This file provides a function which creates a class Simulation. This class inherits from the user-provided +class MicroSimulation. A global ID member variable is defined for the class Simulation, which ensures that each +created object is uniquely identifiable in a global setting. """ -def create_micro_problem_class(base_micro_simulation): +def create_simulation_class(micro_simulation_class): """ - Creates a class MicroSimulation which inherits from the class of the micro simulation. + Creates a class Simulation which inherits from the class of the micro simulation. Parameters ---------- @@ -14,12 +16,12 @@ def create_micro_problem_class(base_micro_simulation): Returns ------- - MicroSimulation : class - Definition of class MicroSimulation defined in this function. + Simulation : class + Definition of class Simulation defined in this function. """ - class MicroSimulation(base_micro_simulation): + class Simulation(micro_simulation_class): def __init__(self, global_id): - base_micro_simulation.__init__(self) + micro_simulation_class.__init__(self) self._global_id = global_id def get_global_id(self) -> int: @@ -28,4 +30,4 @@ def get_global_id(self) -> int: def set_global_id(self, global_id) -> None: self._global_id = global_id - return MicroSimulation + return Simulation From c33e2125ac0744d19b218abcd3620c1f8760b25a Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 7 Jul 2023 14:58:12 +0200 Subject: [PATCH 64/87] Split methods in micro_manager.py into public and private methods --- micro_manager/micro_manager.py | 240 ++++++++++++++++--------------- tests/unit/test_micro_manager.py | 6 +- 2 files changed, 127 insertions(+), 119 deletions(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 7cd0f92f..b97a0d90 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -26,7 +26,7 @@ class MicroManager: def __init__(self, config_file: str) -> None: """ - Constructor of MicroManager class. + Constructor Parameters ---------- @@ -113,6 +113,10 @@ def __init__(self, config_file: str) -> None: self._is_adaptivity_required_in_every_implicit_iteration = self._config.is_adaptivity_required_in_every_implicit_iteration() self._micro_sims_active_steps = None + # ************** + # Public methods + # ************** + def initialize(self) -> None: """ This function does the following things: @@ -250,12 +254,126 @@ def initialize(self) -> None: # Write initial data if required if self._interface.is_action_required(precice.action_write_initial_data()): - self.write_data_to_precice(micro_sims_output) + self._write_data_to_precice(micro_sims_output) self._interface.mark_action_fulfilled(precice.action_write_initial_data()) self._interface.initialize_data() - def read_data_from_precice(self) -> list: + def solve(self): + """ + This function handles the coupling time loop, including checkpointing and output. + """ + t, n = 0, 0 + t_checkpoint, n_checkpoint = 0, 0 + + if self._is_adaptivity_on: + similarity_dists = np.zeros( + (self._number_of_micro_sims_for_adaptivity, + self._number_of_micro_sims_for_adaptivity)) + + # Start adaptivity calculation with all sims inactive + is_sim_active = np.array([False] * self._number_of_micro_sims_for_adaptivity) + + # Activate the first one (a random choice) + is_sim_active[0] = True + + # Associate all sims to the one active sim + sim_is_associated_to = np.zeros((self._number_of_micro_sims_for_adaptivity), dtype=np.intc) + sim_is_associated_to[0] = -2 # An active sim does not have an associated sim + + similarity_dists_cp = None + is_sim_active_cp = None + sim_is_associated_to_cp = None + sim_states_cp = [None] * self._local_number_of_sims + + while self._interface.is_coupling_ongoing(): + if self._interface.is_action_required(precice.action_write_iteration_checkpoint()): + for i in range(self._local_number_of_sims): + sim_states_cp[i] = self._micro_sims[i].get_state() + t_checkpoint = t + n_checkpoint = n + + if self._is_adaptivity_on: + if not self._is_adaptivity_required_in_every_implicit_iteration: + similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity( + self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity) + + # Only do checkpointing if adaptivity is computed once in every time window + similarity_dists_cp = np.copy(similarity_dists) + is_sim_active_cp = np.copy(is_sim_active) + sim_is_associated_to_cp = np.copy(sim_is_associated_to) + + if self._adaptivity_type == "local": + active_sim_ids = np.where(is_sim_active)[0] + elif self._adaptivity_type == "global": + active_sim_ids = np.where( + is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0] + + for active_id in active_sim_ids: + self._micro_sims_active_steps[active_id] += 1 + + self._interface.mark_action_fulfilled( + precice.action_write_iteration_checkpoint()) + + micro_sims_input = self._read_data_from_precice() + + if self._is_adaptivity_on: + if self._is_adaptivity_required_in_every_implicit_iteration: + similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity( + self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity) + + if self._adaptivity_type == "local": + active_sim_ids = np.where(is_sim_active)[0] + elif self._adaptivity_type == "global": + active_sim_ids = np.where( + is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0] + + for active_id in active_sim_ids: + self._micro_sims_active_steps[active_id] += 1 + + micro_sims_output = self._solve_micro_simulations_with_adaptivity( + micro_sims_input, is_sim_active, sim_is_associated_to) + else: + micro_sims_output = self._solve_micro_simulations(micro_sims_input) + + self._write_data_to_precice(micro_sims_output) + + self._dt = self._interface.advance(self._dt) + + t += self._dt + n += 1 + + # Revert all micro simulations to checkpoints if required + if self._interface.is_action_required(precice.action_read_iteration_checkpoint()): + for i in range(self._local_number_of_sims): + self._micro_sims[i].set_state(sim_states_cp[i]) + n = n_checkpoint + t = t_checkpoint + + if self._is_adaptivity_on: + if not self._is_adaptivity_required_in_every_implicit_iteration: + similarity_dists = np.copy(similarity_dists_cp) + is_sim_active = np.copy(is_sim_active_cp) + sim_is_associated_to = np.copy(sim_is_associated_to_cp) + + self._interface.mark_action_fulfilled( + precice.action_read_iteration_checkpoint()) + else: # Time window has converged, now micro output can be generated + self._logger.info("Micro simulations {} - {} have converged at t = {}".format( + self._micro_sims[0].get_global_id(), self._micro_sims[-1].get_global_id(), t)) + + if self._micro_sims_have_output: + if n % self._micro_n_out == 0: + for micro_sim in self._micro_sims: + micro_sim.output() + + self._interface.finalize() + + # *************** + # Private methods + # *************** + + def _read_data_from_precice(self) -> list: """ Read data from preCICE. Depending on initial definition of whether a data is scalar or vector, the appropriate preCICE API command is called. @@ -283,7 +401,7 @@ def read_data_from_precice(self) -> list: return [dict(zip(read_data, t)) for t in zip(*read_data.values())] - def write_data_to_precice(self, micro_sims_output: list) -> None: + def _write_data_to_precice(self, micro_sims_output: list) -> None: """ Write output of micro simulations to preCICE. @@ -317,7 +435,7 @@ def write_data_to_precice(self, micro_sims_output: list) -> None: self._interface.write_block_scalar_data( self._write_data_ids[dname], [], np.array([])) - def solve_micro_simulations(self, micro_sims_input: list) -> list: + def _solve_micro_simulations(self, micro_sims_input: list) -> list: """ Solve all micro simulations using the data read from preCICE and assemble the micro simulations outputs in a list of dicts format. @@ -346,7 +464,7 @@ def solve_micro_simulations(self, micro_sims_input: list) -> list: return micro_sims_output - def solve_micro_simulations_with_adaptivity( + def _solve_micro_simulations_with_adaptivity( self, micro_sims_input: list, is_sim_active: np.ndarray, @@ -418,116 +536,6 @@ def solve_micro_simulations_with_adaptivity( return micro_sims_output - def solve(self): - """ - This function handles the coupling time loop, including checkpointing and output. - """ - t, n = 0, 0 - t_checkpoint, n_checkpoint = 0, 0 - - if self._is_adaptivity_on: - similarity_dists = np.zeros( - (self._number_of_micro_sims_for_adaptivity, - self._number_of_micro_sims_for_adaptivity)) - - # Start adaptivity calculation with all sims inactive - is_sim_active = np.array([False] * self._number_of_micro_sims_for_adaptivity) - - # Activate the first one (a random choice) - is_sim_active[0] = True - - # Associate all sims to the one active sim - sim_is_associated_to = np.zeros((self._number_of_micro_sims_for_adaptivity), dtype=np.intc) - sim_is_associated_to[0] = -2 # An active sim does not have an associated sim - - similarity_dists_cp = None - is_sim_active_cp = None - sim_is_associated_to_cp = None - sim_states_cp = [None] * self._local_number_of_sims - - while self._interface.is_coupling_ongoing(): - if self._interface.is_action_required(precice.action_write_iteration_checkpoint()): - for i in range(self._local_number_of_sims): - sim_states_cp[i] = self._micro_sims[i].get_state() - t_checkpoint = t - n_checkpoint = n - - if self._is_adaptivity_on: - if not self._is_adaptivity_required_in_every_implicit_iteration: - similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity( - self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity) - - # Only do checkpointing if adaptivity is computed once in every time window - similarity_dists_cp = np.copy(similarity_dists) - is_sim_active_cp = np.copy(is_sim_active) - sim_is_associated_to_cp = np.copy(sim_is_associated_to) - - if self._adaptivity_type == "local": - active_sim_ids = np.where(is_sim_active)[0] - elif self._adaptivity_type == "global": - active_sim_ids = np.where( - is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0] - - for active_id in active_sim_ids: - self._micro_sims_active_steps[active_id] += 1 - - self._interface.mark_action_fulfilled( - precice.action_write_iteration_checkpoint()) - - micro_sims_input = self.read_data_from_precice() - - if self._is_adaptivity_on: - if self._is_adaptivity_required_in_every_implicit_iteration: - similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity( - self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity) - - if self._adaptivity_type == "local": - active_sim_ids = np.where(is_sim_active)[0] - elif self._adaptivity_type == "global": - active_sim_ids = np.where( - is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0] - - for active_id in active_sim_ids: - self._micro_sims_active_steps[active_id] += 1 - - micro_sims_output = self.solve_micro_simulations_with_adaptivity( - micro_sims_input, is_sim_active, sim_is_associated_to) - else: - micro_sims_output = self.solve_micro_simulations(micro_sims_input) - - self.write_data_to_precice(micro_sims_output) - - self._dt = self._interface.advance(self._dt) - - t += self._dt - n += 1 - - # Revert all micro simulations to checkpoints if required - if self._interface.is_action_required(precice.action_read_iteration_checkpoint()): - for i in range(self._local_number_of_sims): - self._micro_sims[i].set_state(sim_states_cp[i]) - n = n_checkpoint - t = t_checkpoint - - if self._is_adaptivity_on: - if not self._is_adaptivity_required_in_every_implicit_iteration: - similarity_dists = np.copy(similarity_dists_cp) - is_sim_active = np.copy(is_sim_active_cp) - sim_is_associated_to = np.copy(sim_is_associated_to_cp) - - self._interface.mark_action_fulfilled( - precice.action_read_iteration_checkpoint()) - else: # Time window has converged, now micro output can be generated - self._logger.info("Micro simulations {} - {} have converged at t = {}".format( - self._micro_sims[0].get_global_id(), self._micro_sims[-1].get_global_id(), t)) - - if self._micro_sims_have_output: - if n % self._micro_n_out == 0: - for micro_sim in self._micro_sims: - micro_sim.output() - - self._interface.finalize() - def main(): parser = argparse.ArgumentParser(description='.') diff --git a/tests/unit/test_micro_manager.py b/tests/unit/test_micro_manager.py index e2dede53..32b79094 100644 --- a/tests/unit/test_micro_manager.py +++ b/tests/unit/test_micro_manager.py @@ -59,8 +59,8 @@ def test_initialize(self): def test_read_write_data_from_precice(self): manager = micro_manager.MicroManager('micro-manager-config.json') - manager.write_data_to_precice(self.fake_write_data) - read_data = manager.read_data_from_precice() + manager._write_data_to_precice(self.fake_write_data) + read_data = manager._read_data_from_precice() print("read_data: {}".format(read_data)) @@ -74,7 +74,7 @@ def test_solve_mico_sims(self): manager._local_number_of_sims = 4 manager._micro_sims = [MicroSimulation() for _ in range(4)] manager._micro_sims_active_steps = np.zeros(4, dtype=np.int32) - micro_sims_output = manager.solve_micro_simulations(self.fake_read_data) + micro_sims_output = manager._solve_micro_simulations(self.fake_read_data) for data, fake_data in zip(micro_sims_output, self.fake_write_data): self.assertEqual(data["micro-scalar-data"], 2) self.assertListEqual(data["micro-vector-data"].tolist(), From a0d31db4ea88ab435ead629f3bc19e39bb499f30 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 7 Jul 2023 21:42:22 +0200 Subject: [PATCH 65/87] Adding incomplete and missing documentation --- micro_manager/adaptivity/adaptivity.py | 36 +++++++--- micro_manager/adaptivity/global_adaptivity.py | 69 +++++++++++++++---- micro_manager/adaptivity/local_adaptivity.py | 42 +++++++---- micro_manager/config.py | 35 ++++++++-- micro_manager/micro_manager.py | 67 ++++++++++-------- 5 files changed, 179 insertions(+), 70 deletions(-) diff --git a/micro_manager/adaptivity/adaptivity.py b/micro_manager/adaptivity/adaptivity.py index d9e4edd9..44e2e2ee 100644 --- a/micro_manager/adaptivity/adaptivity.py +++ b/micro_manager/adaptivity/adaptivity.py @@ -9,7 +9,15 @@ class AdaptivityCalculator: def __init__(self, configurator, logger) -> None: - # Names of data to be used for adaptivity computation + """ + Class constructor. + + Parameters + ---------- + configurator : object of class Config + Object which has getter functions to get parameters defined in the configuration file. + logger : Logger defined from the standard package logging + """ self._refine_const = configurator.get_adaptivity_refining_const() self._coarse_const = configurator.get_adaptivity_coarsening_const() self._hist_param = configurator.get_adaptivity_hist_param() @@ -134,16 +142,21 @@ def _check_for_activation( similarity_dists: np.ndarray, is_sim_active: np.ndarray) -> bool: """ - Function to check if an inactive simulation needs to be activated + Check if an inactive simulation needs to be activated. Parameters ---------- inactive_id : int - ID of inactive simulation which is checked for activation + ID of inactive simulation which is checked for activation. similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair + 2D array having similarity distances between each micro simulation pair. is_sim_active : numpy array - 1D array having state (active or inactive) of each micro simulation + 1D array having state (active or inactive) of each micro simulation. + + Return + ------ + tag : bool + True if the inactive simulation needs to be activated, False otherwise. """ active_sim_ids = np.where(is_sim_active)[0] @@ -158,16 +171,21 @@ def _check_for_deactivation( similarity_dists: np.ndarray, is_sim_active: np.ndarray) -> bool: """ - Function to check if an active simulation needs to be deactivated + Check if an active simulation needs to be deactivated. Parameters ---------- active_id : int - ID of active simulation which is checked for deactivation + ID of active simulation which is checked for deactivation. similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair + 2D array having similarity distances between each micro simulation pair. is_sim_active : numpy array - 1D array having state (active or inactive) of each micro simulation + 1D array having state (active or inactive) of each micro simulation. + + Return + ------ + tag : bool + True if the active simulation needs to be deactivated, False otherwise. """ active_sim_ids = np.where(is_sim_active)[0] diff --git a/micro_manager/adaptivity/global_adaptivity.py b/micro_manager/adaptivity/global_adaptivity.py index cc39a63a..4c025eca 100644 --- a/micro_manager/adaptivity/global_adaptivity.py +++ b/micro_manager/adaptivity/global_adaptivity.py @@ -1,5 +1,9 @@ """ -Functionality for adaptive control of micro simulations in a global way (all-to-all comparison of micro simulations) +Class GlobalAdaptivityCalculator provides methods to adaptively control of micro simulations +in a global way. If the Micro Manager is run in parallel, an all-to-all comparison of simulations +on each rank is done. + +Note: All ID variables used in the methods of this class are global IDs, unless they have *local* in their name. """ import numpy as np import hashlib @@ -10,11 +14,6 @@ class GlobalAdaptivityCalculator(AdaptivityCalculator): - """ - This class provides functionality to compute adaptivity globally, i.e. by comparing micro simulation from all processes. - All ID variables used in the methods of this class are global IDs, unless they have *local* in their name. - """ - def __init__( self, configurator, @@ -24,6 +23,26 @@ def __init__( global_ids: list, comm, rank: int) -> None: + """ + Class constructor. + + Parameters + ---------- + configurator : object of class Config + Object which has getter functions to get parameters defined in the configuration file. + logger : object of logging + Logger defined from the standard package logging + is_sim_on_this_rank : list + List of booleans. True if simulation is on this rank, False otherwise. + rank_of_sim : numpy array + 1D array consisting of rank on which the simulation lives. + global_ids : list + List of global IDs of simulations living on this rank. + comm : MPI.COMM_WORLD + Global communicator of MPI. + rank : int + MPI rank. + """ super().__init__(configurator, logger) self._is_sim_on_this_rank = is_sim_on_this_rank self._rank_of_sim = rank_of_sim @@ -45,7 +64,7 @@ def compute_adaptivity( Parameters ---------- dt : float - TODO + Current time step of the macro-micro coupled problem micro_sims : list List of objects of class MicroProblem, which are the micro simulations similarity_dists_nm1 : numpy array @@ -96,7 +115,8 @@ def communicate_micro_output( sim_is_associated_to: np.ndarray, micro_output: list) -> None: """ - Communicate micro output from active simulation to their associated inactive simulations. P2P communication is done. + Communicate micro output from active simulation to their associated inactive simulations. + Process to process (p2p) communication is done. Parameters ---------- @@ -181,8 +201,6 @@ def _update_inactive_sims( if self._is_sim_on_this_rank[i]: to_be_activated_ids.append(i) - print("is_sim_active: {}, to_be_activated: {}".format(_is_sim_active, to_be_activated_ids)) - local_sim_is_associated_to = _sim_is_associated_to[self._global_ids[0]:self._global_ids[-1] + 1] # Keys are global IDs of active sims not on this rank, values are lists of local and @@ -221,7 +239,24 @@ def _update_inactive_sims( return _is_sim_active, _sim_is_associated_to - def _create_tag(self, sim_id, src_rank, dest_rank): + def _create_tag(self, sim_id: int, src_rank: int, dest_rank: int) -> int: + """ + For a given simulations ID, source rank, and destination rank, a unique tag is created. + + Parameters + ---------- + sim_id : int + Global ID of a simulation. + src_rank : int + Rank on which the simulation lives + dest_rank : int + Rank to which data of a simulation is to be sent to. + + Returns + ------- + tag : int + Unique tag. + """ send_hashtag = hashlib.sha256() send_hashtag.update((str(src_rank) + str(sim_id) + str(dest_rank)).encode('utf-8')) tag = int(send_hashtag.hexdigest()[:6], base=16) @@ -229,12 +264,20 @@ def _create_tag(self, sim_id, src_rank, dest_rank): def _p2p_comm(self, assoc_active_ids: list, data: list) -> list: """ - This function created sending and receiving maps for p2p communication. + Handle process to process communication for a given set of associated active IDs and data. Parameters ---------- assoc_active_ids : list - Global IDs of active simulations which are not on this rank and are associated to the inactive simulations on this rank + Global IDs of active simulations which are not on this rank and are associated to + the inactive simulations on this rank. + data : list + Complete data from which parts are to be sent and received. + + Returns + ------- + recv_reqs : list + List of MPI requests of receive operations. """ send_map_local: Dict[int, int] = dict() # keys are global IDs, values are rank to send to send_map: Dict[int, list] = dict() # keys are global IDs of sims to send, values are ranks to send the sims to diff --git a/micro_manager/adaptivity/local_adaptivity.py b/micro_manager/adaptivity/local_adaptivity.py index e38baa67..2f82c877 100644 --- a/micro_manager/adaptivity/local_adaptivity.py +++ b/micro_manager/adaptivity/local_adaptivity.py @@ -1,5 +1,7 @@ """ -Functionality for adaptive control of micro simulations locally within a rank (or the entire domain if the Micro Manager is run in serial) +Class LocalAdaptivityCalculator provides methods to adaptively control of micro simulations +in a local way. If the Micro Manager is run in parallel, simulations on one rank are compared to +each other. A global comparison is not done. """ import numpy as np from .adaptivity import AdaptivityCalculator @@ -7,6 +9,16 @@ class LocalAdaptivityCalculator(AdaptivityCalculator): def __init__(self, configurator, logger) -> None: + """ + Class constructor. + + Parameters + ---------- + configurator : object of class Config + Object which has getter functions to get parameters defined in the configuration file. + logger : object of logging + Logger defined from the standard package logging + """ super().__init__(configurator, logger) def compute_adaptivity( @@ -18,29 +30,30 @@ def compute_adaptivity( sim_is_associated_to_nm1: np.ndarray, data_for_adaptivity: dict): """ - Compute adaptivity locally (within a rank) based on similarity distances and micro simulation states + Compute adaptivity locally (within a rank). Parameters ---------- dt : float Current time step micro_sims : list - TODO + List containing simulation objects similarity_dists_nm1 : numpy array - 2D array having similarity distances between each micro simulation pair + 2D array having similarity distances between each micro simulation pair. is_sim_active_nm1 : numpy array - 1D array having True if sim is active, False if sim is inactive + 1D array having True if sim is active, False if sim is inactive. sim_is_associated_to_nm1 : numpy array - 1D array with values of associated simulations of inactive simulations. Active simulations have None + 1D array with values of associated simulations of inactive simulations. Active simulations have None. data_for_adaptivity : dict - TODO + A dictionary containing the names of the data to be used in adaptivity as keys and information on whether + the data are scalar or vector as values. - Results + Returns ------- similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair + 2D array having similarity distances between each micro simulation pair. is_sim_active : numpy array - 1D array, True is sim is active, False if sim is inactive + 1D array, True is sim is active, False if sim is inactive. """ similarity_dists = self._get_similarity_dists(dt, similarity_dists_nm1, data_for_adaptivity) @@ -68,16 +81,17 @@ def _update_inactive_sims( """ Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones and if it is not similar to any of them, it is activated. + Parameters ---------- similarity_dists : numpy array - 2D array having similarity distances between each micro simulation pair + 2D array having similarity distances between each micro simulation pair. is_sim_active : numpy array - 1D array having state (active or inactive) of each micro simulation + 1D array having state (active or inactive) of each micro simulation. sim_is_associated_to : numpy array - 1D array with values of associated simulations of inactive simulations. Active simulations have None + 1D array with values of associated simulations of inactive simulations. Active simulations have None. micro_sims : list - TODO + List containing micro simulation objects. Returns ------- diff --git a/micro_manager/config.py b/micro_manager/config.py index 44d8b35f..b3c89e19 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -4,7 +4,6 @@ import json import os -import sys class Config: @@ -276,70 +275,94 @@ def write_micro_solve_time(self): def turn_on_adaptivity(self): """ + Boolean stating whether adaptivity is ot or not. Returns ------- + adaptivity : bool + True is adaptivity settings are done, False otherwise. """ return self._adaptivity def get_adaptivity_type(self): """ + String stating type of adaptivity computation, either "local" or "global". Returns ------- + adaptivity_type : str + Either "local" or "global" depending on the type of adaptivity computation """ return self._adaptivity_type def get_data_for_adaptivity(self): """ + Get names of data to be used for similarity distance calculation in adaptivity Returns ------- - + data_for_adaptivity : dict_like + A dictionary containing the names of the data to be used in adaptivity as keys and information on whether + the data are scalar or vector as values. """ return self._data_for_adaptivity def get_adaptivity_hist_param(self): """ + Get adaptivity history parameter. + More details: https://precice.org/tooling-micro-manager-configuration.html#adaptivity Returns ------- - + adaptivity_hist_param : float + Adaptivity history parameter """ return self._adaptivity_history_param def get_adaptivity_coarsening_const(self): """ + Get adaptivity coarsening constant. + More details: https://precice.org/tooling-micro-manager-configuration.html#adaptivity Returns ------- - + adaptivity_coarsening_constant : float + Adaptivity coarsening constant """ return self._adaptivity_coarsening_constant def get_adaptivity_refining_const(self): """ + Get adaptivity refining constant. + More details: https://precice.org/tooling-micro-manager-configuration.html#adaptivity Returns ------- - + adaptivity_refining_constant : float + Adaptivity refining constant """ return self._adaptivity_refining_constant def get_adaptivity_similarity_measure(self): """ + Get measure to be used to calculate similarity between pairs of simulations. + More details: https://precice.org/tooling-micro-manager-configuration.html#adaptivity Returns ------- - + adaptivity_similarity_measure : str + String of measure to be used in calculating similarity between pairs of simulations. """ return self._adaptivity_similarity_measure def is_adaptivity_required_in_every_implicit_iteration(self): """ + Check if adaptivity needs to be calculated in every time iteration or every time window. Returns ------- + adaptivity_every_implicit_iteration : bool + True if adaptivity needs to be calculated in every time iteration, False otherwise. """ return self._adaptivity_every_implicit_iteration diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index b97a0d90..33e77184 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -1,6 +1,15 @@ #!/usr/bin/env python3 """ -Micro Manager: a tool to initialize and adaptively control micro simulations and couple them via preCICE to a macro simulation +Micro Manager is a tool to initialize and adaptively control micro simulations and couple them via preCICE to a macro simulation. +This files the class MicroManager which has the following callable public methods: + +- initialize +- solve + +This file is directly executable as it consists of a main() function. Upon execution, an object of the class MicroManager is created using a given JSON file, +and the initialize and solve methods are called. + +Detailed documentation: https://precice.org/tooling-micro-manager-overview.html """ import argparse @@ -119,15 +128,14 @@ def __init__(self, config_file: str) -> None: def initialize(self) -> None: """ - This function does the following things: + Initialize the Micro Manager. - Decomposes the domain if the Micro Manager is executed in parallel - Initializes preCICE - Gets the macro mesh information from preCICE - Creates all micro simulation objects and initializes them if an initialization procedure is available - Writes initial data to preCICE """ - # Decompose the macro-domain and set the mesh access region for each - # partition in preCICE + # Decompose the macro-domain and set the mesh access region for each partition in preCICE assert len(self._macro_bounds) / \ 2 == self._interface.get_dimensions(), "Provided macro mesh bounds are of incorrect dimension" if self._is_parallel: @@ -138,7 +146,7 @@ def initialize(self) -> None: self._interface.set_mesh_access_region(self._macro_mesh_id, coupling_mesh_bounds) - # initialize preCICE + # Initialize preCICE self._dt = self._interface.initialize() self._mesh_vertex_ids, mesh_vertex_coords = self._interface.get_mesh_vertices_and_ids(self._macro_mesh_id) @@ -259,9 +267,12 @@ def initialize(self) -> None: self._interface.initialize_data() - def solve(self): + def solve(self) -> None: """ - This function handles the coupling time loop, including checkpointing and output. + Solve the problem using preCICE. + - Handle checkpointing is implicit coupling is done. + - Read data from preCICE, solve micro simulations, and write data to preCICE + - If adaptivity is on, compute micro simulations adaptively. """ t, n = 0, 0 t_checkpoint, n_checkpoint = 0, 0 @@ -287,6 +298,7 @@ def solve(self): sim_states_cp = [None] * self._local_number_of_sims while self._interface.is_coupling_ongoing(): + # Write a checkpoint if self._interface.is_action_required(precice.action_write_iteration_checkpoint()): for i in range(self._local_number_of_sims): sim_states_cp[i] = self._micro_sims[i].get_state() @@ -298,7 +310,8 @@ def solve(self): similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity( self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity) - # Only do checkpointing if adaptivity is computed once in every time window + # Only checkpoint the adaptivity configuration if adaptivity is computed + # once in every time window similarity_dists_cp = np.copy(similarity_dists) is_sim_active_cp = np.copy(is_sim_active) sim_is_associated_to_cp = np.copy(sim_is_associated_to) @@ -343,13 +356,14 @@ def solve(self): t += self._dt n += 1 - # Revert all micro simulations to checkpoints if required + # Revert micro simulations to their last checkpoints if required if self._interface.is_action_required(precice.action_read_iteration_checkpoint()): for i in range(self._local_number_of_sims): self._micro_sims[i].set_state(sim_states_cp[i]) n = n_checkpoint t = t_checkpoint + # If adaptivity is computed only once per time window, the states of sims need to be reset too if self._is_adaptivity_on: if not self._is_adaptivity_required_in_every_implicit_iteration: similarity_dists = np.copy(similarity_dists_cp) @@ -364,8 +378,8 @@ def solve(self): if self._micro_sims_have_output: if n % self._micro_n_out == 0: - for micro_sim in self._micro_sims: - micro_sim.output() + for sim in self._micro_sims: + sim.output() self._interface.finalize() @@ -375,8 +389,7 @@ def solve(self): def _read_data_from_precice(self) -> list: """ - Read data from preCICE. Depending on initial definition of whether a data is scalar or vector, the appropriate - preCICE API command is called. + Read data from preCICE. Returns ------- @@ -401,31 +414,31 @@ def _read_data_from_precice(self) -> list: return [dict(zip(read_data, t)) for t in zip(*read_data.values())] - def _write_data_to_precice(self, micro_sims_output: list) -> None: + def _write_data_to_precice(self, data: list) -> None: """ - Write output of micro simulations to preCICE. + Write data to preCICE. Parameters ---------- - micro_sims_output : list + data : list List of dicts in which keys are names of data and the values are the data to be written to preCICE. """ - write_data: Dict[str, list] = dict() + data_dict: Dict[str, list] = dict() if not self._is_rank_empty: - for name in micro_sims_output[0]: - write_data[name] = [] + for name in data[0]: + data_dict[name] = [] - for output_dict in micro_sims_output: + for output_dict in data: for name, values in output_dict.items(): - write_data[name].append(values) + data_dict[name].append(values) for dname, is_data_vector in self._write_data_names.items(): if is_data_vector: self._interface.write_block_vector_data( - self._write_data_ids[dname], self._mesh_vertex_ids, write_data[dname]) + self._write_data_ids[dname], self._mesh_vertex_ids, data_dict[dname]) else: self._interface.write_block_scalar_data( - self._write_data_ids[dname], self._mesh_vertex_ids, write_data[dname]) + self._write_data_ids[dname], self._mesh_vertex_ids, data_dict[dname]) else: for dname, is_data_vector in self._write_data_names.items(): if is_data_vector: @@ -437,8 +450,7 @@ def _write_data_to_precice(self, micro_sims_output: list) -> None: def _solve_micro_simulations(self, micro_sims_input: list) -> list: """ - Solve all micro simulations using the data read from preCICE and assemble the micro simulations outputs in a list of dicts - format. + Solve all micro simulations and assemble the micro simulations outputs in a list of dicts format. Parameters ---------- @@ -470,8 +482,7 @@ def _solve_micro_simulations_with_adaptivity( is_sim_active: np.ndarray, sim_is_associated_to: np.ndarray) -> list: """ - Solve all micro simulations using the data read from preCICE and assemble the micro simulations outputs in a list of dicts - format. + Solve all micro simulations and assemble the micro simulations outputs in a list of dicts format. Parameters ---------- @@ -529,7 +540,7 @@ def _solve_micro_simulations_with_adaptivity( if self._is_micro_solve_time_required: micro_sims_output[inactive_id]["micro_sim_time"] = 0 - # Collect micro sim output for adaptivity + # Collect micro sim output for adaptivity calculation for i in range(self._local_number_of_sims): for name in self._adaptivity_micro_data_names: self._data_for_adaptivity[name][i] = micro_sims_output[i][name] From b4fbbb9c6334dbc30129910264d640982cacec3b Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 14 Jul 2023 22:15:58 +0200 Subject: [PATCH 66/87] Split tests and add new parallel tests for global adaptivity --- .github/workflows/run-adaptivity-test.yml | 54 +++++-- ...ager-unit-tests.yml => run-unit-tests.yml} | 0 micro_manager/adaptivity/global_adaptivity.py | 19 ++- micro_manager/config.py | 2 +- micro_manager/domain_decomposition.py | 16 ++- micro_manager/micro_manager.py | 20 +-- tests/README.md | 10 ++ .../clean-test.sh | 0 ...ger-config-global-adaptivity-parallel.json | 0 ...icro-manager-config-global-adaptivity.json | 0 ...micro-manager-config-local-adaptivity.json | 0 .../micro-manager-config-parallel-1.json | 0 .../micro-manager-config-parallel-2.json | 0 .../micro_dummy.py | 0 .../precice-config.xml | 0 .../run_micro_manager.py | 0 .../unit_cube.py} | 0 tests/unit/test_adaptivity_parallel.py | 117 +++++++++++++++ ...daptivity.py => test_adaptivity_serial.py} | 136 ++++++------------ tests/unit/test_domain_decomposition.py | 10 +- tests/unit/test_micro_manager.py | 6 +- 21 files changed, 259 insertions(+), 131 deletions(-) rename .github/workflows/{run-micro-manager-unit-tests.yml => run-unit-tests.yml} (100%) create mode 100644 tests/README.md rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/clean-test.sh (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/micro-manager-config-global-adaptivity-parallel.json (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/micro-manager-config-global-adaptivity.json (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/micro-manager-config-local-adaptivity.json (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/micro-manager-config-parallel-1.json (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/micro-manager-config-parallel-2.json (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/micro_dummy.py (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/precice-config.xml (100%) rename tests/integration/{test_unit_cube_dummy => test_unit_cube}/run_micro_manager.py (100%) rename tests/integration/{test_unit_cube_dummy/unit_cube_macro.py => test_unit_cube/unit_cube.py} (100%) create mode 100644 tests/unit/test_adaptivity_parallel.py rename tests/unit/{test_adaptivity.py => test_adaptivity_serial.py} (76%) diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index d0f2b5dd..832ba449 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -1,4 +1,4 @@ -name: Run tests for adaptivity +name: Test adaptivity on: push: branches: @@ -32,16 +32,16 @@ jobs: - name: Run integration test with local adaptivity timeout-minutes: 3 - working-directory: micro-manager/tests/integration/test_unit_cube_dummy - run: python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-local-adaptivity.json + working-directory: micro-manager/tests/integration/test_unit_cube + run: python3 unit_cube.py & python3 run_micro_manager.py --config micro-manager-config-local-adaptivity.json - name: Run integration test serially with global adaptivity timeout-minutes: 3 - working-directory: micro-manager/tests/integration/test_unit_cube_dummy - run: python3 unit_cube_macro.py & python3 run_micro_manager.py --config micro-manager-config-global-adaptivity.json + working-directory: micro-manager/tests/integration/test_unit_cube + run: python3 unit_cube.py & python3 run_micro_manager.py --config micro-manager-config-global-adaptivity.json - adaptivity_unit_tests: - name: Run adaptivity unit tests + adaptivity_unit_tests_serial: + name: Run adaptivity unit tests (serial variants) runs-on: ubuntu-latest container: precice/precice steps: @@ -64,4 +64,42 @@ jobs: - name: Run unit tests working-directory: micro-manager/tests/unit - run: python3 -m unittest test_adaptivity.py + run: python3 -m unittest test_adaptivity_serial.py + + adaptivity_unit_tests_parallel: + name: Run adaptivity unit tests (parallel variants) + runs-on: ubuntu-latest + container: precice/precice + steps: + - name: Checkout Repository + uses: actions/checkout@v3 + with: + path: micro-manager + + - name: Install sudo for MPI + working-directory: micro-manager + run: | + apt-get -qq update + apt-get -qq install sudo + + - name: Use mpi4py + uses: mpi4py/setup-mpi@v1 + + - name: Add user precice + run: useradd -m -s /bin/bash precice + + - name: Install Dependencies + working-directory: micro-manager + run: | + apt-get -qq update + apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config + su -c "python -m pip install --upgrade pip" precice + su -c "pip install setuptools wheel twine" precice + + - name: Install Micro Manager + working-directory: micro-manager + run: su -c "pip3 install --user ." precice + + - name: Run unit tests + working-directory: micro-manager/tests/unit + run: su -c "mpiexec -n 2 python3 -m unittest test_adaptivity_parallel.py" precice diff --git a/.github/workflows/run-micro-manager-unit-tests.yml b/.github/workflows/run-unit-tests.yml similarity index 100% rename from .github/workflows/run-micro-manager-unit-tests.yml rename to .github/workflows/run-unit-tests.yml diff --git a/micro_manager/adaptivity/global_adaptivity.py b/micro_manager/adaptivity/global_adaptivity.py index 4c025eca..35be5995 100644 --- a/micro_manager/adaptivity/global_adaptivity.py +++ b/micro_manager/adaptivity/global_adaptivity.py @@ -21,8 +21,8 @@ def __init__( is_sim_on_this_rank: list, rank_of_sim: np.ndarray, global_ids: list, - comm, - rank: int) -> None: + rank: int, + comm) -> None: """ Class constructor. @@ -38,10 +38,10 @@ def __init__( 1D array consisting of rank on which the simulation lives. global_ids : list List of global IDs of simulations living on this rank. - comm : MPI.COMM_WORLD - Global communicator of MPI. rank : int MPI rank. + comm : MPI.COMM_WORLD + Global communicator of MPI. """ super().__init__(configurator, logger) self._is_sim_on_this_rank = is_sim_on_this_rank @@ -89,7 +89,6 @@ def compute_adaptivity( data_as_list = self._comm.allgather(data_for_adaptivity[name]) global_data_for_adaptivity[name] = np.concatenate((data_as_list[:]), axis=0) - # Similarity distance matrix is calculated globally on every rank similarity_dists = self._get_similarity_dists(dt, similarity_dists_nm1, global_data_for_adaptivity) is_sim_active = self._update_active_sims(similarity_dists, is_sim_active_nm1) @@ -125,7 +124,7 @@ def communicate_micro_output( is_sim_active : numpy array 1D array having state (active or inactive) of each micro simulation on this rank sim_is_associated_to : numpy array - 1D array with values of associated simulations of inactive simulations. Active simulations have None + 1D array with values of associated simulations of inactive simulations. Active simulations have -2 micro_output : list List of dicts having individual output of each simulation. Only the active simulation outputs are entered. """ @@ -140,7 +139,7 @@ def communicate_micro_output( for i in inactive_local_ids: assoc_active_id = local_sim_is_associated_to[i] - # Gather global IDs of associated active simulations not on this rank for communication + # Gather global IDs of associated active simulations not on this rank if not self._is_sim_on_this_rank[assoc_active_id]: if assoc_active_id in active_to_inactive_map: active_to_inactive_map[assoc_active_id].append(i) @@ -191,6 +190,7 @@ def _update_inactive_sims( _is_sim_active = np.copy(is_sim_active) # Input is_sim_active is not longer used after this point _sim_is_associated_to = np.copy(sim_is_associated_to) + _sim_is_associated_to_updated = np.copy(sim_is_associated_to) # Check inactive simulations for activation and collect IDs of those to be activated to_be_activated_ids = [] # Global IDs to be activated @@ -198,6 +198,7 @@ def _update_inactive_sims( if not _is_sim_active[i]: # if id is inactive if self._check_for_activation(i, similarity_dists, _is_sim_active): _is_sim_active[i] = True + _sim_is_associated_to_updated[i] = -2 # Active sim cannot have an associated sim if self._is_sim_on_this_rank[i]: to_be_activated_ids.append(i) @@ -216,7 +217,6 @@ def _update_inactive_sims( if self._is_sim_on_this_rank[assoc_active_id]: # Associated active simulation is on the same rank assoc_active_local_id = self._global_ids.index(assoc_active_id) micro_sims[to_be_activated_local_id].set_state(micro_sims[assoc_active_local_id].get_state()) - _sim_is_associated_to[i] = -2 # Active sim cannot have an associated sim else: # Associated active simulation is not on this rank if assoc_active_id in to_be_activated_map: to_be_activated_map[assoc_active_id].append(to_be_activated_local_id) @@ -235,9 +235,8 @@ def _update_inactive_sims( local_ids = to_be_activated_map[global_id] for local_id in local_ids: micro_sims[local_id].set_state(state) - _sim_is_associated_to[self._global_ids[local_id]] = -2 # Active sim cannot have an associated sim - return _is_sim_active, _sim_is_associated_to + return _is_sim_active, _sim_is_associated_to_updated def _create_tag(self, sim_id: int, src_rank: int, dest_rank: int) -> int: """ diff --git a/micro_manager/config.py b/micro_manager/config.py index b3c89e19..6dcf9a1f 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -1,5 +1,5 @@ """ -Configuration module of the Micro Manager +Class Config provides functionality to read a JSON file and pass the values to the Micro Manager. """ import json diff --git a/micro_manager/domain_decomposition.py b/micro_manager/domain_decomposition.py index c1dd61e2..26f48979 100644 --- a/micro_manager/domain_decomposition.py +++ b/micro_manager/domain_decomposition.py @@ -1,5 +1,5 @@ """ -Functionality to partition the macro domain according to partitions in each axis provided by the user +Class DomainDecomposer provides the method decompose_macro_domain which returns partitioned bounds """ import numpy as np @@ -7,6 +7,20 @@ class DomainDecomposer: def __init__(self, logger, dims, rank, size) -> None: + """ + Class constructor. + + Parameters + ---------- + logger : object of logging + Logger defined from the standard package logging. + dims : int + Dimensions of the problem. + rank : int + MPI rank. + size : int + Total number of MPI processes. + """ self._logger = logger self._rank = rank self._size = size diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 33e77184..e4eb9cd9 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -35,12 +35,12 @@ class MicroManager: def __init__(self, config_file: str) -> None: """ - Constructor + Constructor. Parameters ---------- config_file : string - Name of the JSON configuration file (to be provided by the user) + Name of the JSON configuration file (provided by the user). """ self._comm = MPI.COMM_WORLD self._rank = self._comm.Get_rank() @@ -128,12 +128,12 @@ def __init__(self, config_file: str) -> None: def initialize(self) -> None: """ - Initialize the Micro Manager. - - Decomposes the domain if the Micro Manager is executed in parallel - - Initializes preCICE - - Gets the macro mesh information from preCICE - - Creates all micro simulation objects and initializes them if an initialization procedure is available - - Writes initial data to preCICE + Initialize the Micro Manager by performing the following tasks: + - Decompose the domain if the Micro Manager is executed in parallel. + - Initialize preCICE. + - Gets the macro mesh information from preCICE. + - Create all micro simulation objects and initialize them if an initialize() method is available. + - If required, write initial data to preCICE. """ # Decompose the macro-domain and set the mesh access region for each partition in preCICE assert len(self._macro_bounds) / \ @@ -225,8 +225,8 @@ def initialize(self) -> None: self._is_sim_on_this_rank, self._rank_of_sim, self._global_ids_of_local_sims, - self._comm, - self._rank) + self._rank, + self._comm) self._number_of_micro_sims_for_adaptivity = self._global_number_of_micro_sims self._micro_sims_active_steps = np.zeros(self._local_number_of_sims) diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 00000000..8c6fd945 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,10 @@ +# Tests + +This folder contains everything needed for testing. The tests are split into two categories: + +* `unit` contains unit tests that only check independent functions and modules. Interaction with preCICE is not required. Therefore no mocking should be performed. +* `integration` contains integration tests that interact with preCICE. + +## Programming Guidelines + +Make sure to **only** use `tests/MockedPrecice.py` and `@patch.dict('sys.modules', **{'precice': MockedPrecice})` in `integration` and not in `unit`. If during the development of a test mocking becomes necessary or the mocked up version of preCICE is not used you might have to reconsider the design or where the test is located. diff --git a/tests/integration/test_unit_cube_dummy/clean-test.sh b/tests/integration/test_unit_cube/clean-test.sh similarity index 100% rename from tests/integration/test_unit_cube_dummy/clean-test.sh rename to tests/integration/test_unit_cube/clean-test.sh diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity-parallel.json b/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity-parallel.json similarity index 100% rename from tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity-parallel.json rename to tests/integration/test_unit_cube/micro-manager-config-global-adaptivity-parallel.json diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity.json b/tests/integration/test_unit_cube/micro-manager-config-global-adaptivity.json similarity index 100% rename from tests/integration/test_unit_cube_dummy/micro-manager-config-global-adaptivity.json rename to tests/integration/test_unit_cube/micro-manager-config-global-adaptivity.json diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-local-adaptivity.json b/tests/integration/test_unit_cube/micro-manager-config-local-adaptivity.json similarity index 100% rename from tests/integration/test_unit_cube_dummy/micro-manager-config-local-adaptivity.json rename to tests/integration/test_unit_cube/micro-manager-config-local-adaptivity.json diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json b/tests/integration/test_unit_cube/micro-manager-config-parallel-1.json similarity index 100% rename from tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-1.json rename to tests/integration/test_unit_cube/micro-manager-config-parallel-1.json diff --git a/tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json b/tests/integration/test_unit_cube/micro-manager-config-parallel-2.json similarity index 100% rename from tests/integration/test_unit_cube_dummy/micro-manager-config-parallel-2.json rename to tests/integration/test_unit_cube/micro-manager-config-parallel-2.json diff --git a/tests/integration/test_unit_cube_dummy/micro_dummy.py b/tests/integration/test_unit_cube/micro_dummy.py similarity index 100% rename from tests/integration/test_unit_cube_dummy/micro_dummy.py rename to tests/integration/test_unit_cube/micro_dummy.py diff --git a/tests/integration/test_unit_cube_dummy/precice-config.xml b/tests/integration/test_unit_cube/precice-config.xml similarity index 100% rename from tests/integration/test_unit_cube_dummy/precice-config.xml rename to tests/integration/test_unit_cube/precice-config.xml diff --git a/tests/integration/test_unit_cube_dummy/run_micro_manager.py b/tests/integration/test_unit_cube/run_micro_manager.py similarity index 100% rename from tests/integration/test_unit_cube_dummy/run_micro_manager.py rename to tests/integration/test_unit_cube/run_micro_manager.py diff --git a/tests/integration/test_unit_cube_dummy/unit_cube_macro.py b/tests/integration/test_unit_cube/unit_cube.py similarity index 100% rename from tests/integration/test_unit_cube_dummy/unit_cube_macro.py rename to tests/integration/test_unit_cube/unit_cube.py diff --git a/tests/unit/test_adaptivity_parallel.py b/tests/unit/test_adaptivity_parallel.py new file mode 100644 index 00000000..f237feb8 --- /dev/null +++ b/tests/unit/test_adaptivity_parallel.py @@ -0,0 +1,117 @@ +from unittest import TestCase +from unittest.mock import MagicMock +from micro_manager.adaptivity.global_adaptivity import GlobalAdaptivityCalculator +import numpy as np +from mpi4py import MPI + + +class TestGlobalAdaptivity(TestCase): + def setUp(self): + self._comm = MPI.COMM_WORLD + self._rank = self._comm.Get_rank() + self._size = self._comm.Get_size() + + def test_update_inactive_sims_global_adaptivity(self): + """ + Test functionality to update inactive simulations in a particular setting, for a global adaptivity setting. + Run this test in parallel using MPI with 2 ranks. + """ + if self._rank == 0: + is_sim_on_this_rank = [True, True, True, False, False] + global_ids = [0, 1, 2] + elif self._rank == 1: + is_sim_on_this_rank = [False, False, False, True, True] + global_ids = [3, 4] + + is_sim_active = np.array([False, False, True, True, False]) + rank_of_sim = [0, 0, 0, 1, 1] + sim_is_associated_to = [3, 3, -2, -2, 2] + expected_is_sim_active = np.array([True, False, True, True, True]) + expected_sim_is_associated_to = [-2, 3, -2, -2, -2] + + configurator = MagicMock() + configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") + adaptivity_controller = GlobalAdaptivityCalculator( + configurator, + MagicMock(), + is_sim_on_this_rank, + rank_of_sim, + global_ids, + rank=self._rank, + comm=self._comm) + + # Force the activation of sim #0 and #4 + def check_for_activation(i, sim_dists, active): + if i == 0 or i == 4: + return True + else: + return False + + adaptivity_controller._check_for_activation = check_for_activation + + class MicroSimulation(): + def __init__(self, global_id) -> None: + self._global_id = global_id + self._state = [global_id] * 3 + + def get_global_id(self): + return self._global_id + + def set_state(self, state): + self._state = state + + def get_state(self): + return self._state.copy() + + dummy_micro_sims = [] + for i in global_ids: + dummy_micro_sims.append(MicroSimulation(i)) + + is_sim_active, sim_is_associated_to = adaptivity_controller._update_inactive_sims( + np.array([0]), is_sim_active, sim_is_associated_to, dummy_micro_sims) + + self.assertTrue(np.array_equal(expected_is_sim_active, is_sim_active)) + self.assertTrue(np.array_equal(expected_sim_is_associated_to, sim_is_associated_to)) + + if self._rank == 0: + self.assertTrue(np.array_equal([3, 3, 3], dummy_micro_sims[0].get_state())) + elif self._rank == 1: + self.assertTrue(np.array_equal([2, 2, 2], dummy_micro_sims[1].get_state())) + + def test_communicate_micro_output(self): + """ + Test functionality to communicate micro output from active sims to their associated inactive sims, for a global adaptivity setting. + Run this test in parallel using MPI with 2 ranks. + """ + output_0 = {"data0.1": 1.0, "data0.2": [1.0, 2.0]} + output_1 = {"data1.1": 10.0, "data1.2": [10.0, 20.0]} + + if self._rank == 0: + is_sim_on_this_rank = [True, True, True, False, False] + global_ids = [0, 1, 2] + sim_output = [None, None, output_0] + expected_sim_output = [output_1, output_1, output_0] + elif self._rank == 1: + is_sim_on_this_rank = [False, False, False, True, True] + global_ids = [3, 4] + sim_output = [output_1, None] + expected_sim_output = [output_1, output_0] + + is_sim_active = np.array([False, False, True, True, False]) + rank_of_sim = [0, 0, 0, 1, 1] + sim_is_associated_to = [3, 3, -2, -2, 2] + + configurator = MagicMock() + configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") + adaptivity_controller = GlobalAdaptivityCalculator( + configurator, + MagicMock(), + is_sim_on_this_rank, + rank_of_sim, + global_ids, + rank=self._rank, + comm=self._comm) + + adaptivity_controller.communicate_micro_output(is_sim_active, sim_is_associated_to, sim_output) + + self.assertTrue(np.array_equal(expected_sim_output, sim_output)) diff --git a/tests/unit/test_adaptivity.py b/tests/unit/test_adaptivity_serial.py similarity index 76% rename from tests/unit/test_adaptivity.py rename to tests/unit/test_adaptivity_serial.py index d91765bd..7334b203 100644 --- a/tests/unit/test_adaptivity.py +++ b/tests/unit/test_adaptivity_serial.py @@ -6,10 +6,10 @@ from micro_manager.config import Config import numpy as np from math import exp +from mpi4py import MPI class TestLocalAdaptivity(TestCase): - def setUp(self): self._number_of_sims = 5 self._dt = 0.1 @@ -60,7 +60,7 @@ def setUp(self): def test_get_similarity_dists(self): """ - Test base functionality of calculating the similarity distance matrix + Test functionality of calculating the similarity distance matrix in class AdaptivityCalculator. """ configurator = MagicMock() configurator.get_adaptivity_similarity_measure = MagicMock(return_value='L1') @@ -90,13 +90,13 @@ def test_get_similarity_dists(self): def test_update_active_sims(self): """ - Test base functionality of updating active simulations + Test functionality of updating active simulations in class AdaptivityCalculator. """ configurator = MagicMock() configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") adaptivity_controller = AdaptivityCalculator(configurator, logger=MagicMock()) - adaptivity_controller._refine_const = 0.4 - adaptivity_controller._coarse_const = 0.3 + adaptivity_controller._refine_const = self._refine_const + adaptivity_controller._coarse_const = self._coarse_const adaptivity_controller._adaptivity_data_names = ["macro-scalar-data", "macro-vector-data"] # Third and fifth micro sim are active, rest are inactive @@ -116,12 +116,39 @@ class MicroSimulation(): self.assertTrue(np.array_equal(expected_is_sim_active, is_sim_active)) + def test_adaptivity_norms(self): + """ + Test functionality for calculating similarity criteria between pairs of simulations using different norms in class AdaptivityCalculator. + """ + calc = AdaptivityCalculator(Config('micro-manager-config.json'), 0) + + fake_data = np.array([[1], [2], [3]]) + self.assertTrue(np.allclose(calc._l1(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) + # norm taken over last axis -> same as before + self.assertTrue(np.allclose(calc._l2(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) + self.assertTrue(np.allclose(calc._l1rel(fake_data), np.array( + [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) + self.assertTrue(np.allclose(calc._l2rel(fake_data), np.array( + [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) + + fake_2d_data = np.array([[1, 2], [3, 4]]) + self.assertTrue(np.allclose(calc._l1(fake_2d_data), np.array([[0, 4], [4, 0]]))) + self.assertTrue(np.allclose(calc._l2(fake_2d_data), np.array([[0, np.sqrt((1 - 3)**2 + (2 - 4)**2)], + [np.sqrt((1 - 3)**2 + (2 - 4)**2), 0]]))) + self.assertTrue(np.allclose(calc._l1rel(fake_2d_data), np.array( + [[0, abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4))], [abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4)), 0]]))) + self.assertTrue(np.allclose(calc._l2rel(fake_2d_data), np.array([[0, np.sqrt( + (1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2)], [np.sqrt((1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2), 0]]))) + def test_associate_active_to_inactive(self): + """ + Test functionality to associate inactive sims to active ones, in the class AdaptivityCalculator. + """ configurator = MagicMock() configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") - adaptivity_controller = LocalAdaptivityCalculator(configurator, logger=MagicMock()) - adaptivity_controller._refine_const = 0.4 - adaptivity_controller._coarse_const = 0.3 + adaptivity_controller = AdaptivityCalculator(configurator, logger=MagicMock()) + adaptivity_controller._refine_const = self._refine_const + adaptivity_controller._coarse_const = self._coarse_const adaptivity_controller._adaptivity_data_names = ["macro-scalar-data", "macro-vector-data"] is_sim_active = np.array([True, False, False, True, False]) @@ -145,33 +172,15 @@ def get_global_id(self): self.assertTrue(np.array_equal(expected_sim_is_associated_to, sim_is_associated_to)) - def test_adaptivity_norms(self): - calc = AdaptivityCalculator(Config('micro-manager-config.json'), 0) - - fake_data = np.array([[1], [2], [3]]) - self.assertTrue(np.allclose(calc._l1(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) - # norm taken over last axis -> same as before - self.assertTrue(np.allclose(calc._l2(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) - self.assertTrue(np.allclose(calc._l1rel(fake_data), np.array( - [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) - self.assertTrue(np.allclose(calc._l2rel(fake_data), np.array( - [[0, 0.5, 2 / 3], [0.5, 0, 1 / 3], [2 / 3, 1 / 3, 0]]))) - - fake_2d_data = np.array([[1, 2], [3, 4]]) - self.assertTrue(np.allclose(calc._l1(fake_2d_data), np.array([[0, 4], [4, 0]]))) - self.assertTrue(np.allclose(calc._l2(fake_2d_data), np.array([[0, np.sqrt((1 - 3)**2 + (2 - 4)**2)], - [np.sqrt((1 - 3)**2 + (2 - 4)**2), 0]]))) - self.assertTrue(np.allclose(calc._l1rel(fake_2d_data), np.array( - [[0, abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4))], [abs((1 - 3) / max(1, 3) + (2 - 4) / max(2, 4)), 0]]))) - self.assertTrue(np.allclose(calc._l2rel(fake_2d_data), np.array([[0, np.sqrt( - (1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2)], [np.sqrt((1 - 3)**2 / max(1, 3)**2 + (2 - 4)**2 / max(2, 4)**2), 0]]))) - def test_update_inactive_sims_local_adaptivity(self): + """ + Test functionality to update inactive simulations in a particular setting, for a local adaptivity setting. + """ configurator = MagicMock() configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") adaptivity_controller = LocalAdaptivityCalculator(configurator, logger=MagicMock()) - adaptivity_controller._refine_const = 0.4 - adaptivity_controller._coarse_const = 0.3 + adaptivity_controller._refine_const = self._refine_const + adaptivity_controller._coarse_const = self._coarse_const adaptivity_controller._adaptivity_data_names = ["macro-scalar-data", "macro-vector-data"] # Third and fifth micro sim are active, rest are deactivate @@ -213,68 +222,3 @@ def get_state(self): self.assertTrue(np.array_equal(expected_is_sim_active, is_sim_active)) self.assertTrue(np.array_equal(expected_sim_is_associated_to, sim_is_associated_to)) - - def test_update_inactive_sims_global_adaptivity(self): - is_sim_on_this_rank = [False, False, False, True, True] # from the perspective of rank 0 - rank_of_sim = [0, 0, 0, 1, 1] - global_ids = [0, 1, 2, 3, 4] - configurator = MagicMock() - configurator.get_adaptivity_similarity_measure = MagicMock(return_value="L1") - adaptivity_controller = GlobalAdaptivityCalculator( - configurator, - MagicMock(), - is_sim_on_this_rank, - rank_of_sim, - global_ids, - comm=MagicMock(), - rank=1) - adaptivity_controller._refine_const = 0.4 - adaptivity_controller._coarse_const = 0.3 - - # Third and fifth micro sim are active, rest are deactivate - expected_is_sim_active = np.array([True, False, False, True, False]) - expected_sim_is_associated_to = np.array([-2, 0, 0, -2, 3]) - - is_sim_active = np.array([True, False, False, False, False]) - sim_is_associated_to = np.array([-2, 0, 0, 0, 3]) - - class MicroSimulation(): - def get_global_id(self): - return 1 - - def set_global_id(self, global_id): - pass - - def set_state(self, state): - pass - - def get_state(self): - pass - - class fake_MPI_request(): - def __init__(self, state, global_id) -> None: - self._state = state - self._global_id = global_id - - def wait(self): - return self._state, self._global_id - - fake_requests = [fake_MPI_request(None, 0)] - - adaptivity_controller._p2p_comm = MagicMock(return_value=fake_requests) - - dummy_micro_sims = [] - for i in range(self._number_of_sims): - dummy_micro_sims.append(MicroSimulation()) - - is_sim_active, sim_is_associated_to = adaptivity_controller._update_inactive_sims( - self._similarity_dists, is_sim_active, sim_is_associated_to, dummy_micro_sims) - - self.assertTrue(np.array_equal(expected_is_sim_active, is_sim_active)) - self.assertTrue(np.array_equal(expected_sim_is_associated_to, sim_is_associated_to)) - - def test_communicate_micro_output(self): - pass - - def test_p2p_comm(self): - pass diff --git a/tests/unit/test_domain_decomposition.py b/tests/unit/test_domain_decomposition.py index 04473394..9a813e5c 100644 --- a/tests/unit/test_domain_decomposition.py +++ b/tests/unit/test_domain_decomposition.py @@ -5,12 +5,14 @@ class TestDomainDecomposition(TestCase): - def setUp(self) -> None: self._logger = MagicMock() self._macro_bounds_3d = [-1, 1, -2, 2, -2, 8] def test_rank5_outof_10_3d(self): + """ + Test domain decomposition for rank 5 in a setting of axis-wise ranks: [1, 2, 5] + """ rank = 5 size = 10 ranks_per_axis = [1, 2, 5] @@ -21,6 +23,9 @@ def test_rank5_outof_10_3d(self): self.assertTrue(np.allclose(mesh_bounds, [-1, 1, 0, 2, 2, 4])) def test_rank10_out_of_32_3d(self): + """ + Test domain decomposition for rank 10 in a setting of axis-wise ranks: [4, 1, 8] + """ rank = 10 size = 32 ranks_per_axis = [4, 1, 8] @@ -31,6 +36,9 @@ def test_rank10_out_of_32_3d(self): self.assertTrue(np.allclose(mesh_bounds, [0, 0.5, -2, 2, 0.5, 1.75])) def test_rank7_out_of_16_3d(self): + """ + Test domain decomposition for rank 7 in a setting of axis-wise ranks: [8, 2, 1] + """ rank = 7 size = 16 ranks_per_axis = [8, 2, 1] diff --git a/tests/unit/test_micro_manager.py b/tests/unit/test_micro_manager.py index 32b79094..0199b92b 100644 --- a/tests/unit/test_micro_manager.py +++ b/tests/unit/test_micro_manager.py @@ -62,8 +62,6 @@ def test_read_write_data_from_precice(self): manager._write_data_to_precice(self.fake_write_data) read_data = manager._read_data_from_precice() - print("read_data: {}".format(read_data)) - for data, fake_data in zip(read_data, self.fake_read_data): self.assertEqual(data["macro-scalar-data"], 1) self.assertListEqual(data["macro-vector-data"].tolist(), @@ -74,7 +72,9 @@ def test_solve_mico_sims(self): manager._local_number_of_sims = 4 manager._micro_sims = [MicroSimulation() for _ in range(4)] manager._micro_sims_active_steps = np.zeros(4, dtype=np.int32) + micro_sims_output = manager._solve_micro_simulations(self.fake_read_data) + for data, fake_data in zip(micro_sims_output, self.fake_write_data): self.assertEqual(data["micro-scalar-data"], 2) self.assertListEqual(data["micro-vector-data"].tolist(), @@ -89,8 +89,6 @@ def test_config(self): self.assertEqual(config._micro_output_n, 10) self.assertDictEqual(config._read_data_names, self.fake_read_data_names) self.assertDictEqual(self.fake_write_data_names, config._write_data_names) - - # test adaptivity self.assertEqual(config._adaptivity, True) self.assertDictEqual(config._data_for_adaptivity, self.fake_read_data_names) self.assertEqual(config._adaptivity_type, "local") From 5c452e4ae85146fd3106fdcd1bfe7890198ad8dc Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 14 Jul 2023 22:32:22 +0200 Subject: [PATCH 67/87] Add get_state and set_state functions to documentation, and simplify test names --- .github/workflows/run-adaptivity-test.yml | 12 +++++------ .../run-domain-decomposition-tests.yml | 20 +++++++++---------- .github/workflows/run-macro-micro-dummy.yml | 4 ++-- docs/micro-simulation-convert-to-library.md | 17 ++++++++-------- micro_manager/domain_decomposition.py | 2 +- 5 files changed, 27 insertions(+), 28 deletions(-) diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml index 832ba449..b5ec9145 100644 --- a/.github/workflows/run-adaptivity-test.yml +++ b/.github/workflows/run-adaptivity-test.yml @@ -8,8 +8,8 @@ on: branches: - "*" jobs: - adaptivity_integration_test: - name: Run adaptivity integration test + integration_test: + name: Run integration test runs-on: ubuntu-latest container: precice/precice steps: @@ -40,8 +40,8 @@ jobs: working-directory: micro-manager/tests/integration/test_unit_cube run: python3 unit_cube.py & python3 run_micro_manager.py --config micro-manager-config-global-adaptivity.json - adaptivity_unit_tests_serial: - name: Run adaptivity unit tests (serial variants) + unit_tests_serial: + name: Run unit tests (serial variants) runs-on: ubuntu-latest container: precice/precice steps: @@ -66,8 +66,8 @@ jobs: working-directory: micro-manager/tests/unit run: python3 -m unittest test_adaptivity_serial.py - adaptivity_unit_tests_parallel: - name: Run adaptivity unit tests (parallel variants) + unit_tests_parallel: + name: Run unit tests (parallel variants) runs-on: ubuntu-latest container: precice/precice steps: diff --git a/.github/workflows/run-domain-decomposition-tests.yml b/.github/workflows/run-domain-decomposition-tests.yml index 73330d50..444ebff3 100644 --- a/.github/workflows/run-domain-decomposition-tests.yml +++ b/.github/workflows/run-domain-decomposition-tests.yml @@ -1,4 +1,4 @@ -name: Run tests for domain decomposition +name: Test domain decomposition on: push: branches: @@ -8,8 +8,8 @@ on: branches: - "*" jobs: - domain_decomposition_integration_test: - name: Run domain decomposition integration tests + integration_test: + name: Run integration tests runs-on: ubuntu-latest container: precice/precice steps: @@ -45,19 +45,19 @@ jobs: timeout-minutes: 3 working-directory: micro-manager/tests/integration run: | - chown -R precice test_unit_cube_dummy/ - cd test_unit_cube_dummy/ - su -c "mpiexec -n 2 python3 run_micro_manager.py --config micro-manager-config-parallel-1.json & python3 unit_cube_macro.py" precice + chown -R precice test_unit_cube/ + cd test_unit_cube/ + su -c "mpiexec -n 2 python3 run_micro_manager.py --config micro-manager-config-parallel-1.json & python3 unit_cube.py" precice - name: Run integration test (variant 2) timeout-minutes: 3 working-directory: micro-manager/tests/integration run: | - cd test_unit_cube_dummy/ - su -c "mpiexec -n 6 --oversubscribe python3 run_micro_manager.py --config micro-manager-config-parallel-2.json & python3 unit_cube_macro.py" precice + cd test_unit_cube/ + su -c "mpiexec -n 6 --oversubscribe python3 run_micro_manager.py --config micro-manager-config-parallel-2.json & python3 unit_cube.py" precice - domain_decomposition_unit_tests: - name: Run domain decomposition unit tests + unit_tests: + name: Run unit tests runs-on: ubuntu-latest container: precice/precice steps: diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml index 211b847c..2394f853 100644 --- a/.github/workflows/run-macro-micro-dummy.yml +++ b/.github/workflows/run-macro-micro-dummy.yml @@ -8,8 +8,8 @@ on: branches: - "*" jobs: - run_macro_micro_dummy: - name: Run macro-micro dummy + run_dummy: + name: Run dummy runs-on: ubuntu-latest container: precice/precice steps: diff --git a/docs/micro-simulation-convert-to-library.md b/docs/micro-simulation-convert-to-library.md index b582499a..8cafb9ec 100644 --- a/docs/micro-simulation-convert-to-library.md +++ b/docs/micro-simulation-convert-to-library.md @@ -29,32 +29,31 @@ class MicroSimulation: # Name is fixed Python dictionary with names of micro data as keys and the data as values at the initial condition """ - def solve(self, macro_data, dt) -> dict: + def solve(self, macro_data: dict, dt: float) -> dict: """ Solve one time step of the micro simulation for transient problems or solve until steady state for steady-state problems. Parameters ---------- macro_data : dict - Dictionary with names of macro data as keys and the data as values + Dictionary with names of macro data as keys and the data as values. dt : float - Time step size + Current time step size. Returns ------- micro_data : dict - Dictionary with names of micro data as keys and the updated micro data a values + Dictionary with names of micro data as keys and the updated micro data a values. """ - def save_checkpoint(self): + def set_state(self, state): """ - Save the state of the micro simulation. *Required for implicit coupling*. - Save the state internally. + Set the state of the micro simulation. """ - def reload_checkpoint(self): + def get_state(self): """ - Revert the micro simulation to a previously saved state. *Required for implicit coupling*. + Return the state of the micro simulation. """ def output(self): diff --git a/micro_manager/domain_decomposition.py b/micro_manager/domain_decomposition.py index 26f48979..91fd6bf8 100644 --- a/micro_manager/domain_decomposition.py +++ b/micro_manager/domain_decomposition.py @@ -54,7 +54,7 @@ def decompose_macro_domain(self, macro_bounds: list, ranks_per_axis: list) -> li for d in range(self._dims): dx.append(abs(macro_bounds[d * 2 + 1] - macro_bounds[d * 2]) / ranks_per_axis[d]) - rank_in_axis: list[int] = [None] * self._dims + rank_in_axis: list[int] = [0] * self._dims if ranks_per_axis[0] == 1: # if serial in x axis rank_in_axis[0] = 0 else: From 7af4879d02a8e9b4b1c68dbeb428a20d9fa1ed25 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 14 Jul 2023 23:17:18 +0200 Subject: [PATCH 68/87] Remove all traces of checkpoint --- examples/cpp-dummy/micro_cpp_dummy.cpp | 8 ++++---- examples/cpp-dummy/micro_cpp_dummy.hpp | 2 +- examples/python-dummy/micro_dummy.py | 8 ++++---- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/examples/cpp-dummy/micro_cpp_dummy.cpp b/examples/cpp-dummy/micro_cpp_dummy.cpp index 17f782c6..78b473ba 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.cpp +++ b/examples/cpp-dummy/micro_cpp_dummy.cpp @@ -13,7 +13,7 @@ #include "micro_cpp_dummy.hpp" // Constructor -MicroSimulation::MicroSimulation() : _micro_scalar_data(0), _checkpoint(0) {} +MicroSimulation::MicroSimulation() : _micro_scalar_data(0), _state(0) {} // Initialize void MicroSimulation::initialize() @@ -21,7 +21,7 @@ void MicroSimulation::initialize() std::cout << "Initialize micro problem\n"; _micro_scalar_data = 0; _micro_vector_data.clear(); - _checkpoint = 0; + _state = 0; } // Solve @@ -57,13 +57,13 @@ py::dict MicroSimulation::solve(py::dict macro_data, double dt) void MicroSimulation::set_state(py::list state) { _micro_scalar_data = state[0].cast(); - _checkpoint = state[1].cast(); + _state = state[1].cast(); } // This function needs to return variables which can fully define the state of a micro simulation py::list MicroSimulation::get_state() const { - std::vector state{_micro_scalar_data, _checkpoint}; + std::vector state{_micro_scalar_data, _state}; py::list state_python = py::cast(state); return state_python; } diff --git a/examples/cpp-dummy/micro_cpp_dummy.hpp b/examples/cpp-dummy/micro_cpp_dummy.hpp index 9c0acf69..4e9828d7 100644 --- a/examples/cpp-dummy/micro_cpp_dummy.hpp +++ b/examples/cpp-dummy/micro_cpp_dummy.hpp @@ -26,5 +26,5 @@ class MicroSimulation private: double _micro_scalar_data; std::vector _micro_vector_data; - double _checkpoint; + double _state; }; diff --git a/examples/python-dummy/micro_dummy.py b/examples/python-dummy/micro_dummy.py index a961717e..783d36d8 100644 --- a/examples/python-dummy/micro_dummy.py +++ b/examples/python-dummy/micro_dummy.py @@ -13,12 +13,12 @@ def __init__(self): self._dims = 3 self._micro_scalar_data = None self._micro_vector_data = None - self._checkpoint = None + self._state = None def initialize(self): self._micro_scalar_data = 0 self._micro_vector_data = [] - self._checkpoint = 0 + self._state = 0 def solve(self, macro_data, dt): assert dt != 0 @@ -31,7 +31,7 @@ def solve(self, macro_data, dt): "micro-vector-data": self._micro_vector_data.copy()} def set_state(self, state): - self._checkpoint = state + self._state = state def get_state(self): - return self._checkpoint + return self._state From f7a275368fdcfd13933082f61d418fc5cedb9029 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 18 Jul 2023 07:59:59 +0200 Subject: [PATCH 69/87] Update CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8461eb15..cec4438d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## latest +- Add global variant to adaptivity (still experimental) https://github.com/precice/micro-manager/pull/42 - Add norm-based (L1 and L2) support for functions in similarity distance calculation with absolute and relative variants https://github.com/precice/micro-manager/pull/40 - New domain decomposition strategy based on user input of number of processors along each axis https://github.com/precice/micro-manager/pull/41 - Add pickling support for C++ solver dummy https://github.com/precice/micro-manager/pull/30 From daf49618dff4b0ea52c7ec69386674de02ab9b54 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 18 Jul 2023 13:05:07 +0200 Subject: [PATCH 70/87] Add release guide --- docs/ReleaseGuide.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 docs/ReleaseGuide.md diff --git a/docs/ReleaseGuide.md b/docs/ReleaseGuide.md new file mode 100644 index 00000000..a1898033 --- /dev/null +++ b/docs/ReleaseGuide.md @@ -0,0 +1,26 @@ +# Guide to release new version of the Micro Manager + +The developer who is releasing a new version of the the Micro Manager is expected to follow this work flow: + +The release of the `micro-manager` repository is made directly from a release branch called `micro-manager-v1.2.3`. This branch is mainly needed to help other developers with testing. + +1. Create a branch called `micro-manager-v1.2.3` from the latest commit of the `develop` branch. + +2. [Open a Pull Request `main` <-- `micro-manager-v1.2.3`](https://github.com/precice/micro-manager/compare/main...main) named after the version (i.e. `Release v1.2.3`) and briefly describe the new features of the release in the PR description. + +3. Bump the version in the `CHANGELOG.md` on `micro-manager-v1.2.3`. + +4. [Draft a New Release](https://github.com/precice/micro-manager/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v1.2.3` or `v1.2.3rc1`, compare to [existing tags](https://github.com/precice/micro-manager/tags)). Use `@target:main`. Release title is also the version number (i.e. `v1.2.3` or `v1.2.3rc1`, compare to [existing releases](https://github.com/precice/micro-manager/tags)). + + * *Note:* If it is a pre-release then the option *This is a pre-release* needs to be selected at the bottom of the page. Use `@target:micro-manager-v1.2.3` for a pre-release, since we will never merge a pre-release into `main`. + * Use the `Auto-generate release notes` feature. + + a) If a pre-release is made: Directly hit the "Publish release" button in your Release Draft. Now you can check the artifacts (e.g. release on [PyPI](https://pypi.org/project/micro-manager-precice/#history)) of the release. *Note:* As soon as a new tag is created github actions will take care of deploying the new version on PyPI using [this workflow](https://github.com/precice/micro-manager/actions?query=workflow%3A%22Upload+Python+Package%22). + + b) If this is a "real" release: As soon as one approving review is made, merge the release PR (from `micro-manager-v1.2.3`) into `main`. + +5. Merge `main` into `develop` for synchronization of `develop`. + +6. If everything is in order up to this point then the new version can be released by hitting the "Publish release" button in your Release Draft. This will create the corresponding tag and trigger [publishing the release to PyPI](https://github.com/precice/micro-manager/actions?query=workflow%3A%22Upload+Python+Package%22). + +7. Add an empty commit on main via `git checkout main`, then `git commit --allow-empty -m "post-tag bump"`. Check that everything is in order via `git log`. Important: The `tag` and `origin/main` should not point to the same commit. From a8b827c850a9e867785b1a802896cef02df78fd3 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 18 Jul 2023 13:36:14 +0200 Subject: [PATCH 71/87] Add content to tests/README --- tests/README.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tests/README.md b/tests/README.md index 8c6fd945..8e98ac76 100644 --- a/tests/README.md +++ b/tests/README.md @@ -2,9 +2,15 @@ This folder contains everything needed for testing. The tests are split into two categories: -* `unit` contains unit tests that only check independent functions and modules. Interaction with preCICE is not required. Therefore no mocking should be performed. -* `integration` contains integration tests that interact with preCICE. +* `unit` contains unit tests that only check independent functions and modules. +* `integration` contains an integration test which uses preCICE and a Micro Manager. -## Programming Guidelines +## Unit tests -Make sure to **only** use `tests/MockedPrecice.py` and `@patch.dict('sys.modules', **{'precice': MockedPrecice})` in `integration` and not in `unit`. If during the development of a test mocking becomes necessary or the mocked up version of preCICE is not used you might have to reconsider the design or where the test is located. +The unit tests can be run with [unittest](https://docs.python.org/3/library/unittest.html). For example, the tests for domain decomposition can be run with `python -m unittest test_domain_decomposition.py`. + +The tests in `test_adaptivity_parallel.py` are designed to be run with 2 MPI processes. This can be run in the following way: `mpiexec -n 2 python -m unittest test_adaptivity_parallel.py`. + +## Integration test + +The integration test is a macro-micro case where the macro simulation is a unit cube. The micro simulation is a dummy which increments the data received from preCICE. From caccb84f50d2169969509bc5211d71cbefa8c8cf Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Tue, 1 Aug 2023 16:02:34 -0400 Subject: [PATCH 72/87] Update README.md --- README.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/README.md b/README.md index 56d5ad73..2ddc2e71 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,4 @@ The main documentation is rendered on the [preCICE website](https://precice.org/ Please report any [issues](https://github.com/precice/micro-manager/issues) and give us feedback through [one of our community channels](https://precice.org/community-channels.html). -The concept and initial design of the Micro Manager has been discussed in - -Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037). +The concept and initial design of the Micro Manager has been discussed in *Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037)*. From 2eb2d7c4f055b573b723a18fbef128aa9269281c Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 4 Aug 2023 14:45:29 -0400 Subject: [PATCH 73/87] Add logger to config and add documentation for tests --- micro_manager/config.py | 22 ++++++++++++---------- micro_manager/micro_manager.py | 2 +- tests/unit/test_adaptivity_serial.py | 2 -- tests/unit/test_domain_decomposition.py | 2 +- tests/unit/test_micro_manager.py | 18 +++++++++++++++++- 5 files changed, 31 insertions(+), 15 deletions(-) diff --git a/micro_manager/config.py b/micro_manager/config.py index 6dcf9a1f..07b73f71 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -12,7 +12,7 @@ class Config: the config class in https://github.com/precice/fenics-adapter/tree/develop/fenicsadapter """ - def __init__(self, config_filename): + def __init__(self, logger, config_filename): """ Constructor of the Config class. @@ -21,6 +21,8 @@ def __init__(self, config_filename): config_filename : string Name of the JSON configuration file """ + self._logger = logger + self._micro_file_name = None self._config_file_name = None @@ -77,7 +79,7 @@ def read_json(self, config_filename): else: raise Exception("Write data dictionary as a value other than 'scalar' or 'vector'") except BaseException: - print("No write data names provided. Micro manager will only read data from preCICE.") + self._logger.info("No write data names provided. Micro manager will only read data from preCICE.") try: self._read_data_names = data["coupling_params"]["read_data_names"] @@ -90,14 +92,14 @@ def read_json(self, config_filename): else: raise Exception("Read data dictionary as a value other than 'scalar' or 'vector'") except BaseException: - print("No read data names provided. Micro manager will only write data to preCICE.") + self._logger.info("No read data names provided. Micro manager will only write data to preCICE.") self._macro_domain_bounds = data["simulation_params"]["macro_domain_bounds"] try: self._ranks_per_axis = data["simulation_params"]["decomposition"] except BaseException: - print("Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") + self._logger.info("Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") try: if data["simulation_params"]["adaptivity"]: @@ -105,7 +107,7 @@ def read_json(self, config_filename): else: self._adaptivity = False except BaseException: - print("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations in all time steps.") + self._logger.info("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations in all time steps.") if self._adaptivity: if data["simulation_params"]["adaptivity"]["type"] == "local": @@ -126,7 +128,7 @@ def read_json(self, config_filename): if "similarity_measure" in data["simulation_params"]["adaptivity"]: self._adaptivity_similarity_measure = data["simulation_params"]["adaptivity"]["similarity_measure"] else: - print("No similarity measure provided, using L1 norm as default") + self._logger.info("No similarity measure provided, using L1 norm as default") self._adaptivity_similarity_measure = "L1" adaptivity_every_implicit_iteration = data["simulation_params"]["adaptivity"]["every_implicit_iteration"] @@ -137,7 +139,7 @@ def read_json(self, config_filename): self._adaptivity_every_implicit_iteration = False if not self._adaptivity_every_implicit_iteration: - print("Micro Manager will compute adaptivity once at the start of every time window") + self._logger.info("Micro Manager will compute adaptivity once at the start of every time window") self._write_data_names["active_state"] = False self._write_data_names["active_steps"] = False @@ -153,12 +155,12 @@ def read_json(self, config_filename): else: raise Exception("Diagnostics data dictionary as a value other than 'scalar' or 'vector'") except BaseException: - print("No diagnostics data is defined. Micro Manager will not output any diagnostics data.") + self._logger.info("No diagnostics data is defined. Micro Manager will not output any diagnostics data.") try: self._micro_output_n = data["diagnostics"]["micro_output_n"] except BaseException: - print("Output interval of micro simulations not specified, if output is available then it will be called " + self._logger.info("Output interval of micro simulations not specified, if output is available then it will be called " "in every time window.") try: @@ -166,7 +168,7 @@ def read_json(self, config_filename): self._output_micro_sim_time = True self._write_data_names["micro_sim_time"] = False except BaseException: - print("Micro manager will not output time required to solve each micro simulation in each time step.") + self._logger.info("Micro manager will not output time required to solve each micro simulation in each time step.") def get_config_file_name(self): """ diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index e4eb9cd9..1858ff48 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -62,7 +62,7 @@ def __init__(self, config_file: str) -> None: self._micro_sims_have_output = False self._logger.info("Provided configuration file: {}".format(config_file)) - self._config = Config(config_file) + self._config = Config(self._logger, config_file) # Define the preCICE interface self._interface = precice.Interface( diff --git a/tests/unit/test_adaptivity_serial.py b/tests/unit/test_adaptivity_serial.py index 7334b203..c9dcc411 100644 --- a/tests/unit/test_adaptivity_serial.py +++ b/tests/unit/test_adaptivity_serial.py @@ -2,11 +2,9 @@ from unittest.mock import MagicMock from micro_manager.adaptivity.adaptivity import AdaptivityCalculator from micro_manager.adaptivity.local_adaptivity import LocalAdaptivityCalculator -from micro_manager.adaptivity.global_adaptivity import GlobalAdaptivityCalculator from micro_manager.config import Config import numpy as np from math import exp -from mpi4py import MPI class TestLocalAdaptivity(TestCase): diff --git a/tests/unit/test_domain_decomposition.py b/tests/unit/test_domain_decomposition.py index 9a813e5c..a07d996d 100644 --- a/tests/unit/test_domain_decomposition.py +++ b/tests/unit/test_domain_decomposition.py @@ -7,7 +7,7 @@ class TestDomainDecomposition(TestCase): def setUp(self) -> None: self._logger = MagicMock() - self._macro_bounds_3d = [-1, 1, -2, 2, -2, 8] + self._macro_bounds_3d = [-1, 1, -2, 2, -2, 8] # Cuboid which is not symmetric around origin def test_rank5_outof_10_3d(self): """ diff --git a/tests/unit/test_micro_manager.py b/tests/unit/test_micro_manager.py index 0199b92b..385119f4 100644 --- a/tests/unit/test_micro_manager.py +++ b/tests/unit/test_micro_manager.py @@ -1,5 +1,6 @@ import numpy as np from unittest import TestCase +from unittest.mock import MagicMock import micro_manager @@ -35,6 +36,9 @@ def setUp(self): self.macro_bounds = [0.0, 25.0, 0.0, 25.0, 0.0, 25.0] def test_micromanager_constructor(self): + """ + Test if the constructor of the MicroManager class passes correct values to member variables. + """ manager = micro_manager.MicroManager('micro-manager-config.json') self.assertListEqual(manager._macro_bounds, self.macro_bounds) @@ -43,6 +47,9 @@ def test_micromanager_constructor(self): self.assertEqual(manager._micro_n_out, 10) def test_initialize(self): + """ + Test if the initialize function of the MicroManager class initializes member variables to correct values + """ manager = micro_manager.MicroManager('micro-manager-config.json') manager.initialize() @@ -57,6 +64,9 @@ def test_initialize(self): self.assertDictEqual(self.fake_write_data_names, manager._write_data_names) def test_read_write_data_from_precice(self): + """ + Test if the internal functions _read_data_from_precice and _write_data_to_precice work as expected. + """ manager = micro_manager.MicroManager('micro-manager-config.json') manager._write_data_to_precice(self.fake_write_data) @@ -68,6 +78,9 @@ def test_read_write_data_from_precice(self): fake_data["macro-vector-data"].tolist()) def test_solve_mico_sims(self): + """ + Test if the internal function _solve_micro_simulations works as expected. + """ manager = micro_manager.MicroManager('micro-manager-config.json') manager._local_number_of_sims = 4 manager._micro_sims = [MicroSimulation() for _ in range(4)] @@ -81,7 +94,10 @@ def test_solve_mico_sims(self): (fake_data["micro-vector-data"] + 1).tolist()) def test_config(self): - config = micro_manager.Config('micro-manager-config.json') + """ + Test if the functions in the Config class work. + """ + config = micro_manager.Config(MagicMock(), 'micro-manager-config.json') self.assertEqual(config._config_file_name.split("/")[-1], "dummy-config.xml") self.assertEqual(config._micro_file_name, "test_micro_manager") From 8d55232dc2ee7d56a45d7c188dfd87dcc6caf1c5 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 4 Aug 2023 16:33:46 -0400 Subject: [PATCH 74/87] Formatting --- micro_manager/config.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/micro_manager/config.py b/micro_manager/config.py index 07b73f71..d881c09e 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -99,7 +99,8 @@ def read_json(self, config_filename): try: self._ranks_per_axis = data["simulation_params"]["decomposition"] except BaseException: - self._logger.info("Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") + self._logger.info( + "Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.") try: if data["simulation_params"]["adaptivity"]: @@ -107,7 +108,8 @@ def read_json(self, config_filename): else: self._adaptivity = False except BaseException: - self._logger.info("Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations in all time steps.") + self._logger.info( + "Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations in all time steps.") if self._adaptivity: if data["simulation_params"]["adaptivity"]["type"] == "local": @@ -160,15 +162,17 @@ def read_json(self, config_filename): try: self._micro_output_n = data["diagnostics"]["micro_output_n"] except BaseException: - self._logger.info("Output interval of micro simulations not specified, if output is available then it will be called " - "in every time window.") + self._logger.info( + "Output interval of micro simulations not specified, if output is available then it will be called " + "in every time window.") try: if data["diagnostics"]["output_micro_sim_solve_time"]: self._output_micro_sim_time = True self._write_data_names["micro_sim_time"] = False except BaseException: - self._logger.info("Micro manager will not output time required to solve each micro simulation in each time step.") + self._logger.info( + "Micro manager will not output time required to solve each micro simulation in each time step.") def get_config_file_name(self): """ From 23ab50be1a33db889851182c49f956b121a092ca Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 7 Aug 2023 15:04:20 -0400 Subject: [PATCH 75/87] Add badges in README --- README.md | 12 ++++++++++++ micro_manager/config.py | 1 + setup.py | 3 +-- tests/README.md | 4 ++-- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 2ddc2e71..0d82970d 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,17 @@ # Micro Manager + + GNU LGPL license + + + + Test Adaptivity + + + + Upload Python Package + + A tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://www.precice.org/). The main documentation is rendered on the [preCICE website](https://precice.org/tooling-micro-manager-overview.html). diff --git a/micro_manager/config.py b/micro_manager/config.py index d881c09e..5877f6d9 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -116,6 +116,7 @@ def read_json(self, config_filename): self._adaptivity_type = "local" elif data["simulation_params"]["adaptivity"]["type"] == "global": self._adaptivity_type = "global" + self._logger.warning("Global adaptivity is still experimental. We recommend using it for small (<50 macro vertices) cases only.") else: raise Exception("Adaptivity type can be either local or global.") diff --git a/setup.py b/setup.py index 8a13a625..6083111c 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,5 @@ import os from setuptools import setup, find_packages -import warnings # from https://stackoverflow.com/a/9079062 import sys @@ -18,7 +17,7 @@ description='micro-manager-precice is a package which facilitates two-scale macro-micro coupled simulations using preCICE', long_description=long_description, long_description_content_type='text/markdown', - url='https://github.com/precice/micro-manager', + url='https://precice.org/tooling-micro-manager-overview.html', entry_points={ 'console_scripts': ['micro_manager=micro_manager.micro_manager:main']}, author='Ishaan Desai', diff --git a/tests/README.md b/tests/README.md index 8e98ac76..8e56cf89 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,8 +1,8 @@ # Tests -This folder contains everything needed for testing. The tests are split into two categories: +This folder contains everything related to testing. The tests are split into two categories: -* `unit` contains unit tests that only check independent functions and modules. +* `unit` contains unit tests that only check independent functions and modules. These tests do not use preCICE. Instead a mocked `precice.py` is used. * `integration` contains an integration test which uses preCICE and a Micro Manager. ## Unit tests From c465ba5dd06d0bd988f67ce41a3adea50f1731db Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Mon, 7 Aug 2023 15:06:11 -0400 Subject: [PATCH 76/87] Formatting --- micro_manager/config.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/micro_manager/config.py b/micro_manager/config.py index 5877f6d9..79164181 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -116,7 +116,8 @@ def read_json(self, config_filename): self._adaptivity_type = "local" elif data["simulation_params"]["adaptivity"]["type"] == "global": self._adaptivity_type = "global" - self._logger.warning("Global adaptivity is still experimental. We recommend using it for small (<50 macro vertices) cases only.") + self._logger.warning( + "Global adaptivity is still experimental. We recommend using it for small (<50 macro vertices) cases only.") else: raise Exception("Adaptivity type can be either local or global.") From f33f3e90dd9aade606c78c3f9ff99870088c5b59 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 9 Aug 2023 08:41:20 -0400 Subject: [PATCH 77/87] Trigger workflows From 191dc878bffc54933761250c34ae954ba0cc231b Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 9 Aug 2023 10:32:00 -0400 Subject: [PATCH 78/87] Fix adaptivity test --- tests/unit/test_adaptivity_serial.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_adaptivity_serial.py b/tests/unit/test_adaptivity_serial.py index c9dcc411..9aaa7227 100644 --- a/tests/unit/test_adaptivity_serial.py +++ b/tests/unit/test_adaptivity_serial.py @@ -118,7 +118,8 @@ def test_adaptivity_norms(self): """ Test functionality for calculating similarity criteria between pairs of simulations using different norms in class AdaptivityCalculator. """ - calc = AdaptivityCalculator(Config('micro-manager-config.json'), 0) + logger = MagicMock() + calc = AdaptivityCalculator(Config(logger, 'micro-manager-config.json'), logger) fake_data = np.array([[1], [2], [3]]) self.assertTrue(np.allclose(calc._l1(fake_data), np.array([[0, 1, 2], [1, 0, 1], [2, 1, 0]]))) From 41d1b7a67a0c2603cfae3b5a445165c8d805655d Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Wed, 9 Aug 2023 16:18:28 -0400 Subject: [PATCH 79/87] use number_of_sims instead of number_of_(local/global)_sims --- micro_manager/micro_manager.py | 24 ++++++++++++------------ tests/unit/test_micro_manager.py | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py index 1858ff48..ef747d7c 100644 --- a/micro_manager/micro_manager.py +++ b/micro_manager/micro_manager.py @@ -93,7 +93,7 @@ def __init__(self, config_file: str) -> None: self._is_micro_solve_time_required = self._config.write_micro_solve_time() self._local_number_of_sims = 0 - self._global_number_of_micro_sims = 0 + self._global_number_of_sims = 0 self._is_rank_empty = False self._dt = 0 self._mesh_vertex_ids = None # IDs of macro vertices as set by preCICE @@ -102,7 +102,7 @@ def __init__(self, config_file: str) -> None: self._is_adaptivity_on = self._config.turn_on_adaptivity() if self._is_adaptivity_on: - self._number_of_micro_sims_for_adaptivity = 0 + self._number_of_sims_for_adaptivity = 0 self._data_for_adaptivity: Dict[str, np.ndarray] = dict() self._adaptivity_type = self._config.get_adaptivity_type() @@ -169,7 +169,7 @@ def initialize(self) -> None: self._comm.Allgather(np.array(self._local_number_of_sims), nms_all_ranks) # Get global number of micro simulations - self._global_number_of_micro_sims = np.sum(nms_all_ranks) + self._global_number_of_sims = np.sum(nms_all_ranks) if self._is_adaptivity_on: for name, is_data_vector in self._adaptivity_data_names.items(): @@ -206,18 +206,18 @@ def initialize(self) -> None: for i in range(self._local_number_of_sims): micro_sims_on_this_rank[i] = self._rank - self._rank_of_sim = np.zeros(self._global_number_of_micro_sims, dtype=np.intc) # DECLARATION + self._rank_of_sim = np.zeros(self._global_number_of_sims, dtype=np.intc) # DECLARATION self._comm.Allgather(micro_sims_on_this_rank, self._rank_of_sim) - self._is_sim_on_this_rank = [False] * self._global_number_of_micro_sims # DECLARATION - for i in range(self._global_number_of_micro_sims): + self._is_sim_on_this_rank = [False] * self._global_number_of_sims # DECLARATION + for i in range(self._global_number_of_sims): if self._rank_of_sim[i] == self._rank: self._is_sim_on_this_rank[i] = True if self._adaptivity_type == "local": self._adaptivity_controller = LocalAdaptivityCalculator( self._config, self._logger) - self._number_of_micro_sims_for_adaptivity = self._local_number_of_sims + self._number_of_sims_for_adaptivity = self._local_number_of_sims elif self._adaptivity_type == "global": self._adaptivity_controller = GlobalAdaptivityCalculator( self._config, @@ -227,7 +227,7 @@ def initialize(self) -> None: self._global_ids_of_local_sims, self._rank, self._comm) - self._number_of_micro_sims_for_adaptivity = self._global_number_of_micro_sims + self._number_of_sims_for_adaptivity = self._global_number_of_sims self._micro_sims_active_steps = np.zeros(self._local_number_of_sims) else: @@ -279,17 +279,17 @@ def solve(self) -> None: if self._is_adaptivity_on: similarity_dists = np.zeros( - (self._number_of_micro_sims_for_adaptivity, - self._number_of_micro_sims_for_adaptivity)) + (self._number_of_sims_for_adaptivity, + self._number_of_sims_for_adaptivity)) # Start adaptivity calculation with all sims inactive - is_sim_active = np.array([False] * self._number_of_micro_sims_for_adaptivity) + is_sim_active = np.array([False] * self._number_of_sims_for_adaptivity) # Activate the first one (a random choice) is_sim_active[0] = True # Associate all sims to the one active sim - sim_is_associated_to = np.zeros((self._number_of_micro_sims_for_adaptivity), dtype=np.intc) + sim_is_associated_to = np.zeros((self._number_of_sims_for_adaptivity), dtype=np.intc) sim_is_associated_to[0] = -2 # An active sim does not have an associated sim similarity_dists_cp = None diff --git a/tests/unit/test_micro_manager.py b/tests/unit/test_micro_manager.py index 385119f4..af5d5fc8 100644 --- a/tests/unit/test_micro_manager.py +++ b/tests/unit/test_micro_manager.py @@ -55,7 +55,7 @@ def test_initialize(self): manager.initialize() self.assertEqual(manager._dt, 0.1) # from Interface.initialize - self.assertEqual(manager._global_number_of_micro_sims, 4) + self.assertEqual(manager._global_number_of_sims, 4) self.assertListEqual(manager._macro_bounds, self.macro_bounds) self.assertListEqual(manager._mesh_vertex_ids.tolist(), [0, 1, 2, 3]) self.assertEqual(len(manager._micro_sims), 4) From 4dd37aa4f7b07152490ff1cb9ea5c577af1a6fb1 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Thu, 24 Aug 2023 15:05:27 -0400 Subject: [PATCH 80/87] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0d82970d..af8f6b4f 100644 --- a/README.md +++ b/README.md @@ -18,4 +18,4 @@ The main documentation is rendered on the [preCICE website](https://precice.org/ Please report any [issues](https://github.com/precice/micro-manager/issues) and give us feedback through [one of our community channels](https://precice.org/community-channels.html). -The concept and initial design of the Micro Manager has been discussed in *Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037)*. +The concept and initial design of the Micro Manager is described in *Desai, Ishaan, & Bringedal, Carina & Uekermann, Benjamin. A flexible software approach to simulate two-scale coupled problems. ECCOMAS Congress 2022. [10.23967/eccomas.2022.037](https://doi.org/10.23967/eccomas.2022.037)*. From 2020140321c8ec5a978ef8b4cbc54b68b430f89a Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Fri, 25 Aug 2023 15:48:47 -0400 Subject: [PATCH 81/87] Minor updates to the release guide --- docs/ReleaseGuide.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ReleaseGuide.md b/docs/ReleaseGuide.md index a1898033..e06ada24 100644 --- a/docs/ReleaseGuide.md +++ b/docs/ReleaseGuide.md @@ -6,11 +6,11 @@ The release of the `micro-manager` repository is made directly from a release br 1. Create a branch called `micro-manager-v1.2.3` from the latest commit of the `develop` branch. -2. [Open a Pull Request `main` <-- `micro-manager-v1.2.3`](https://github.com/precice/micro-manager/compare/main...main) named after the version (i.e. `Release v1.2.3`) and briefly describe the new features of the release in the PR description. +2. If it is a real release, [open a Pull Request `main` <-- `micro-manager-v1.2.3`](https://github.com/precice/micro-manager/compare/main...main) named after the version (i.e. `Release v1.2.3`) and briefly describe the new features of the release in the PR description. 3. Bump the version in the `CHANGELOG.md` on `micro-manager-v1.2.3`. -4. [Draft a New Release](https://github.com/precice/micro-manager/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v1.2.3` or `v1.2.3rc1`, compare to [existing tags](https://github.com/precice/micro-manager/tags)). Use `@target:main`. Release title is also the version number (i.e. `v1.2.3` or `v1.2.3rc1`, compare to [existing releases](https://github.com/precice/micro-manager/tags)). +4. [Draft a new release](https://github.com/precice/micro-manager/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v1.2.3` or `v1.2.3rc1`, compare to [existing tags](https://github.com/precice/micro-manager/tags)). Use `@target:main`. Release title is also the version number (i.e. `v1.2.3` or `v1.2.3rc1`, compare to [existing releases](https://github.com/precice/micro-manager/tags)). * *Note:* If it is a pre-release then the option *This is a pre-release* needs to be selected at the bottom of the page. Use `@target:micro-manager-v1.2.3` for a pre-release, since we will never merge a pre-release into `main`. * Use the `Auto-generate release notes` feature. From 7cab4ae436596c7a51226d9057c5a3caee7bbc60 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sat, 26 Aug 2023 10:56:39 -0400 Subject: [PATCH 82/87] Set pyprecice dependency version to <=2.5.0 to ensure compatibility --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 6083111c..4335b4d7 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ packages=find_packages( exclude=['examples']), install_requires=[ - 'pyprecice>=2.3.0', + 'pyprecice>=2.3.0, <=2.5.0', 'numpy>=1.13.3', 'mpi4py'], test_suite='tests', From 2c93cfbcb3bfa0a835302cfe955003b25c37709b Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sat, 26 Aug 2023 11:41:18 -0400 Subject: [PATCH 83/87] Restrict pyprecice to v2.5.0.4 to ensure working state --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4335b4d7..6cd592dc 100644 --- a/setup.py +++ b/setup.py @@ -26,7 +26,7 @@ packages=find_packages( exclude=['examples']), install_requires=[ - 'pyprecice>=2.3.0, <=2.5.0', + 'pyprecice==2.5.0.4', 'numpy>=1.13.3', 'mpi4py'], test_suite='tests', From ead937e8d67e3899256efce6e3144212165156a5 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sat, 26 Aug 2023 12:18:26 -0400 Subject: [PATCH 84/87] Update release guide --- docs/ReleaseGuide.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ReleaseGuide.md b/docs/ReleaseGuide.md index e06ada24..e4333d7e 100644 --- a/docs/ReleaseGuide.md +++ b/docs/ReleaseGuide.md @@ -8,7 +8,7 @@ The release of the `micro-manager` repository is made directly from a release br 2. If it is a real release, [open a Pull Request `main` <-- `micro-manager-v1.2.3`](https://github.com/precice/micro-manager/compare/main...main) named after the version (i.e. `Release v1.2.3`) and briefly describe the new features of the release in the PR description. -3. Bump the version in the `CHANGELOG.md` on `micro-manager-v1.2.3`. +3. Bump the version in the `CHANGELOG.md` and in `setup.py` on `micro-manager-v1.2.3`. 4. [Draft a new release](https://github.com/precice/micro-manager/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v1.2.3` or `v1.2.3rc1`, compare to [existing tags](https://github.com/precice/micro-manager/tags)). Use `@target:main`. Release title is also the version number (i.e. `v1.2.3` or `v1.2.3rc1`, compare to [existing releases](https://github.com/precice/micro-manager/tags)). From a88d08b6969f334abed66c433c97640ea9375bc8 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sun, 27 Aug 2023 20:16:53 -0400 Subject: [PATCH 85/87] Manually set buffer for pickled message in mpi4py, and then remove warning for global adaptivity --- micro_manager/adaptivity/global_adaptivity.py | 3 ++- micro_manager/config.py | 2 -- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/micro_manager/adaptivity/global_adaptivity.py b/micro_manager/adaptivity/global_adaptivity.py index 35be5995..88a48641 100644 --- a/micro_manager/adaptivity/global_adaptivity.py +++ b/micro_manager/adaptivity/global_adaptivity.py @@ -312,7 +312,8 @@ def _p2p_comm(self, assoc_active_ids: list, data: list) -> list: recv_reqs = [] for global_id, recv_rank in recv_map.items(): tag = self._create_tag(global_id, recv_rank, self._rank) - req = self._comm.irecv(source=recv_rank, tag=tag) + bufsize = 1 << 30 # allocate and use a temporary 1 MiB buffer size https://github.com/mpi4py/mpi4py/issues/389 + req = self._comm.irecv(bufsize, source=recv_rank, tag=tag) recv_reqs.append(req) # Wait for all non-blocking communication to complete diff --git a/micro_manager/config.py b/micro_manager/config.py index 79164181..d881c09e 100644 --- a/micro_manager/config.py +++ b/micro_manager/config.py @@ -116,8 +116,6 @@ def read_json(self, config_filename): self._adaptivity_type = "local" elif data["simulation_params"]["adaptivity"]["type"] == "global": self._adaptivity_type = "global" - self._logger.warning( - "Global adaptivity is still experimental. We recommend using it for small (<50 macro vertices) cases only.") else: raise Exception("Adaptivity type can be either local or global.") From 319a80ec7e30cdd504dee7ed2e1154bfbad1bf4f Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sun, 27 Aug 2023 20:25:25 -0400 Subject: [PATCH 86/87] Formatting --- micro_manager/adaptivity/global_adaptivity.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/micro_manager/adaptivity/global_adaptivity.py b/micro_manager/adaptivity/global_adaptivity.py index 88a48641..6164efdf 100644 --- a/micro_manager/adaptivity/global_adaptivity.py +++ b/micro_manager/adaptivity/global_adaptivity.py @@ -312,7 +312,7 @@ def _p2p_comm(self, assoc_active_ids: list, data: list) -> list: recv_reqs = [] for global_id, recv_rank in recv_map.items(): tag = self._create_tag(global_id, recv_rank, self._rank) - bufsize = 1 << 30 # allocate and use a temporary 1 MiB buffer size https://github.com/mpi4py/mpi4py/issues/389 + bufsize = 1 << 30 # allocate and use a temporary 1 MiB buffer size https://github.com/mpi4py/mpi4py/issues/389 req = self._comm.irecv(bufsize, source=recv_rank, tag=tag) recv_reqs.append(req) From fe552712b68e05837cd994c6746eaf704c7935c0 Mon Sep 17 00:00:00 2001 From: Ishaan Desai Date: Sat, 26 Aug 2023 14:46:23 -0400 Subject: [PATCH 87/87] Bump version --- CHANGELOG.md | 2 ++ setup.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cec4438d..c784ef1d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## latest +## v0.3.0 + - Add global variant to adaptivity (still experimental) https://github.com/precice/micro-manager/pull/42 - Add norm-based (L1 and L2) support for functions in similarity distance calculation with absolute and relative variants https://github.com/precice/micro-manager/pull/40 - New domain decomposition strategy based on user input of number of processors along each axis https://github.com/precice/micro-manager/pull/41 diff --git a/setup.py b/setup.py index 6cd592dc..38030dcb 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ setup( name='micro-manager-precice', - version='v0.2.1', + version='v0.3.0', description='micro-manager-precice is a package which facilitates two-scale macro-micro coupled simulations using preCICE', long_description=long_description, long_description_content_type='text/markdown',