From a942a1679e51929db66d3c3e54c52b47038fee2a Mon Sep 17 00:00:00 2001 From: Kallinteris Andreas <30759571+Kallinteris-Andreas@users.noreply.github.com> Date: Wed, 6 Dec 2023 11:45:59 +0200 Subject: [PATCH 1/4] add `MuJoCo.tests.test_model_sensors()` (#816) --- tests/envs/mujoco/test_mujoco_v5.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/envs/mujoco/test_mujoco_v5.py b/tests/envs/mujoco/test_mujoco_v5.py index b68d8b5ff..2a8df3c2b 100644 --- a/tests/envs/mujoco/test_mujoco_v5.py +++ b/tests/envs/mujoco/test_mujoco_v5.py @@ -627,6 +627,27 @@ def test_model_object_count(version: str): assert env.model.ntendon == 0 +# note: fails with `mujoco-mjx==3.0.1` +@pytest.mark.parametrize("version", ["v5", "v4", "v3", "v2"]) +def test_model_sensors(version: str): + """Verify that all the sensors of the model are loaded.""" + env = gym.make(f"Ant-{version}").unwrapped + assert env.data.cfrc_ext.shape == (14, 6) + + env = gym.make(f"Humanoid-{version}").unwrapped + assert env.data.cinert.shape == (14, 10) + assert env.data.cvel.shape == (14, 6) + assert env.data.qfrc_actuator.shape == (23,) + assert env.data.cfrc_ext.shape == (14, 6) + + if version != "v3": # HumanoidStandup v3 does not exist + env = gym.make(f"HumanoidStandup-{version}").unwrapped + assert env.data.cinert.shape == (14, 10) + assert env.data.cvel.shape == (14, 6) + assert env.data.qfrc_actuator.shape == (23,) + assert env.data.cfrc_ext.shape == (14, 6) + + def test_dt(): """Assert that env.dt gets assigned correctly.""" env_a = gym.make("Ant-v5", include_cfrc_ext_in_observation=False).unwrapped From 14def0759fa094e9a4a6fa0d7b4d66719a11007b Mon Sep 17 00:00:00 2001 From: Kallinteris Andreas <30759571+Kallinteris-Andreas@users.noreply.github.com> Date: Wed, 6 Dec 2023 23:52:07 +0200 Subject: [PATCH 2/4] [MuJoCo] factorize `_get_rew()` out of `step()` (#819) --- gymnasium/envs/mujoco/ant_v5.py | 31 ++++++++++------ gymnasium/envs/mujoco/half_cheetah_v5.py | 21 ++++++----- gymnasium/envs/mujoco/hopper_v5.py | 30 ++++++++++------ gymnasium/envs/mujoco/humanoid_v5.py | 36 +++++++++++-------- gymnasium/envs/mujoco/humanoidstandup_v5.py | 27 +++++++++----- .../mujoco/inverted_double_pendulum_v5.py | 19 ++++++---- gymnasium/envs/mujoco/pusher_v5.py | 21 +++++++---- gymnasium/envs/mujoco/reacher_v5.py | 21 +++++++---- gymnasium/envs/mujoco/swimmer_v5.py | 22 ++++++++---- gymnasium/envs/mujoco/walker2d_v5.py | 31 +++++++++------- 10 files changed, 165 insertions(+), 94 deletions(-) diff --git a/gymnasium/envs/mujoco/ant_v5.py b/gymnasium/envs/mujoco/ant_v5.py index 5f15ad2cd..8b3ab177e 100644 --- a/gymnasium/envs/mujoco/ant_v5.py +++ b/gymnasium/envs/mujoco/ant_v5.py @@ -376,6 +376,23 @@ def step(self, action): xy_velocity = (xy_position_after - xy_position_before) / self.dt x_velocity, y_velocity = xy_velocity + observation = self._get_obs() + reward, reward_info = self._get_rew(x_velocity, action) + terminated = self.terminated + info = { + "x_position": self.data.qpos[0], + "y_position": self.data.qpos[1], + "distance_from_origin": np.linalg.norm(self.data.qpos[0:2], ord=2), + "x_velocity": x_velocity, + "y_velocity": y_velocity, + **reward_info, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + + def _get_rew(self, x_velocity: float, action): forward_reward = x_velocity * self._forward_reward_weight healthy_reward = self.healthy_reward rewards = forward_reward + healthy_reward @@ -384,24 +401,16 @@ def step(self, action): contact_cost = self.contact_cost costs = ctrl_cost + contact_cost - observation = self._get_obs() reward = rewards - costs - terminated = self.terminated - info = { + + reward_info = { "reward_forward": forward_reward, "reward_ctrl": -ctrl_cost, "reward_contact": -contact_cost, "reward_survive": healthy_reward, - "x_position": self.data.qpos[0], - "y_position": self.data.qpos[1], - "distance_from_origin": np.linalg.norm(self.data.qpos[0:2], ord=2), - "x_velocity": x_velocity, - "y_velocity": y_velocity, } - if self.render_mode == "human": - self.render() - return observation, reward, terminated, False, info + return reward, reward_info def _get_obs(self): position = self.data.qpos.flatten() diff --git a/gymnasium/envs/mujoco/half_cheetah_v5.py b/gymnasium/envs/mujoco/half_cheetah_v5.py index 6499ca7fb..0649f4503 100644 --- a/gymnasium/envs/mujoco/half_cheetah_v5.py +++ b/gymnasium/envs/mujoco/half_cheetah_v5.py @@ -245,22 +245,25 @@ def step(self, action): x_position_after = self.data.qpos[0] x_velocity = (x_position_after - x_position_before) / self.dt - ctrl_cost = self.control_cost(action) + observation = self._get_obs() + reward, reward_info = self._get_rew(x_velocity, action) + info = {"x_position": x_position_after, "x_velocity": x_velocity, **reward_info} + if self.render_mode == "human": + self.render() + return observation, reward, False, False, info + + def _get_rew(self, x_velocity: float, action): forward_reward = self._forward_reward_weight * x_velocity + ctrl_cost = self.control_cost(action) - observation = self._get_obs() reward = forward_reward - ctrl_cost - info = { - "x_position": x_position_after, - "x_velocity": x_velocity, + + reward_info = { "reward_forward": forward_reward, "reward_ctrl": -ctrl_cost, } - - if self.render_mode == "human": - self.render() - return observation, reward, False, False, info + return reward, reward_info def _get_obs(self): position = self.data.qpos.flatten() diff --git a/gymnasium/envs/mujoco/hopper_v5.py b/gymnasium/envs/mujoco/hopper_v5.py index a1f1086fc..68813d190 100644 --- a/gymnasium/envs/mujoco/hopper_v5.py +++ b/gymnasium/envs/mujoco/hopper_v5.py @@ -316,29 +316,37 @@ def step(self, action): x_position_after = self.data.qpos[0] x_velocity = (x_position_after - x_position_before) / self.dt - ctrl_cost = self.control_cost(action) + observation = self._get_obs() + reward, reward_info = self._get_rew(x_velocity, action) + terminated = self.terminated + info = { + "x_position": x_position_after, + "z_distance_from_origin": self.data.qpos[1] - self.init_qpos[1], + "x_velocity": x_velocity, + **reward_info, + } + + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + def _get_rew(self, x_velocity: float, action): forward_reward = self._forward_reward_weight * x_velocity healthy_reward = self.healthy_reward - rewards = forward_reward + healthy_reward + + ctrl_cost = self.control_cost(action) costs = ctrl_cost - observation = self._get_obs() reward = rewards - costs - terminated = self.terminated - info = { + + reward_info = { "reward_forward": forward_reward, "reward_ctrl": -ctrl_cost, "reward_survive": healthy_reward, - "x_position": x_position_after, - "z_distance_from_origin": self.data.qpos[1] - self.init_qpos[1], - "x_velocity": x_velocity, } - if self.render_mode == "human": - self.render() - return observation, reward, terminated, False, info + return reward, reward_info def reset_model(self): noise_low = -self._reset_noise_scale diff --git a/gymnasium/envs/mujoco/humanoid_v5.py b/gymnasium/envs/mujoco/humanoid_v5.py index b0f078ec0..1834d6d48 100644 --- a/gymnasium/envs/mujoco/humanoid_v5.py +++ b/gymnasium/envs/mujoco/humanoid_v5.py @@ -497,23 +497,10 @@ def step(self, action): xy_velocity = (xy_position_after - xy_position_before) / self.dt x_velocity, y_velocity = xy_velocity - ctrl_cost = self.control_cost(action) - contact_cost = self.contact_cost - costs = ctrl_cost + contact_cost - - forward_reward = self._forward_reward_weight * x_velocity - healthy_reward = self.healthy_reward - - rewards = forward_reward + healthy_reward - observation = self._get_obs() - reward = rewards - costs + reward, reward_info = self._get_rew(x_velocity, action) terminated = self.terminated info = { - "reward_survive": healthy_reward, - "reward_forward": forward_reward, - "reward_ctrl": -ctrl_cost, - "reward_contact": -contact_cost, "x_position": self.data.qpos[0], "y_position": self.data.qpos[1], "tendon_lenght": self.data.ten_length, @@ -521,12 +508,33 @@ def step(self, action): "distance_from_origin": np.linalg.norm(self.data.qpos[0:2], ord=2), "x_velocity": x_velocity, "y_velocity": y_velocity, + **reward_info, } if self.render_mode == "human": self.render() return observation, reward, terminated, False, info + def _get_rew(self, x_velocity: float, action): + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + rewards = forward_reward + healthy_reward + + ctrl_cost = self.control_cost(action) + contact_cost = self.contact_cost + costs = ctrl_cost + contact_cost + + reward = rewards - costs + + reward_info = { + "reward_survive": healthy_reward, + "reward_forward": forward_reward, + "reward_ctrl": -ctrl_cost, + "reward_contact": -contact_cost, + } + + return reward, reward_info + def reset_model(self): noise_low = -self._reset_noise_scale noise_high = self._reset_noise_scale diff --git a/gymnasium/envs/mujoco/humanoidstandup_v5.py b/gymnasium/envs/mujoco/humanoidstandup_v5.py index 46e591ac8..99b35cc50 100644 --- a/gymnasium/envs/mujoco/humanoidstandup_v5.py +++ b/gymnasium/envs/mujoco/humanoidstandup_v5.py @@ -444,6 +444,21 @@ def step(self, action): self.do_simulation(action, self.frame_skip) pos_after = self.data.qpos[2] + reward, reward_info = self._get_rew(pos_after, action) + info = { + "x_position": self.data.qpos[0], + "y_position": self.data.qpos[1], + "z_distance_from_origin": self.data.qpos[2] - self.init_qpos[2], + "tendon_lenght": self.data.ten_length, + "tendon_velocity": self.data.ten_velocity, + **reward_info, + } + + if self.render_mode == "human": + self.render() + return self._get_obs(), reward, False, False, info + + def _get_rew(self, pos_after: float, action): uph_cost = (pos_after - 0) / self.model.opt.timestep quad_ctrl_cost = self._ctrl_cost_weight * np.square(self.data.ctrl).sum() @@ -455,20 +470,14 @@ def step(self, action): quad_impact_cost = np.clip(quad_impact_cost, min_impact_cost, max_impact_cost) reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1 - info = { + + reward_info = { "reward_linup": uph_cost, "reward_quadctrl": -quad_ctrl_cost, "reward_impact": -quad_impact_cost, - "x_position": self.data.qpos[0], - "y_position": self.data.qpos[1], - "z_distance_from_origin": self.data.qpos[2] - self.init_qpos[2], - "tendon_lenght": self.data.ten_length, - "tendon_velocity": self.data.ten_velocity, } - if self.render_mode == "human": - self.render() - return self._get_obs(), reward, False, False, info + return reward, reward_info def reset_model(self): noise_low = -self._reset_noise_scale diff --git a/gymnasium/envs/mujoco/inverted_double_pendulum_v5.py b/gymnasium/envs/mujoco/inverted_double_pendulum_v5.py index dfa190ca2..bf2835577 100644 --- a/gymnasium/envs/mujoco/inverted_double_pendulum_v5.py +++ b/gymnasium/envs/mujoco/inverted_double_pendulum_v5.py @@ -194,27 +194,32 @@ def __init__( def step(self, action): self.do_simulation(action, self.frame_skip) + x, _, y = self.data.site_xpos[0] observation = self._get_obs() + terminated = bool(y <= 1) + reward, reward_info = self._get_rew(x, y, terminated) - x, _, y = self.data.site_xpos[0] - v1, v2 = self.data.qvel[1:3] + info = reward_info - terminated = bool(y <= 1) + if self.render_mode == "human": + self.render() + return observation, reward, terminated, False, info + def _get_rew(self, x, y, terminated): + v1, v2 = self.data.qvel[1:3] dist_penalty = 0.01 * x**2 + (y - 2) ** 2 vel_penalty = 1e-3 * v1**2 + 5e-3 * v2**2 alive_bonus = self._healthy_reward * int(not terminated) + reward = alive_bonus - dist_penalty - vel_penalty - info = { + reward_info = { "reward_survive": alive_bonus, "distance_penalty": -dist_penalty, "velocity_penalty": -vel_penalty, } - if self.render_mode == "human": - self.render() - return observation, reward, terminated, False, info + return reward, reward_info def _get_obs(self): return np.concatenate( diff --git a/gymnasium/envs/mujoco/pusher_v5.py b/gymnasium/envs/mujoco/pusher_v5.py index 99d4eabf0..490c4b016 100644 --- a/gymnasium/envs/mujoco/pusher_v5.py +++ b/gymnasium/envs/mujoco/pusher_v5.py @@ -220,6 +220,16 @@ def __init__( } def step(self, action): + reward, reward_info = self._get_rew(action) + self.do_simulation(action, self.frame_skip) + + observation = self._get_obs() + info = reward_info + if self.render_mode == "human": + self.render() + return observation, reward, False, False, info + + def _get_rew(self, action): vec_1 = self.get_body_com("object") - self.get_body_com("tips_arm") vec_2 = self.get_body_com("object") - self.get_body_com("goal") @@ -227,18 +237,15 @@ def step(self, action): reward_dist = -np.linalg.norm(vec_2) * self._reward_dist_weight reward_ctrl = -np.square(action).sum() * self._reward_control_weight - self.do_simulation(action, self.frame_skip) - - observation = self._get_obs() reward = reward_dist + reward_ctrl + reward_near - info = { + + reward_info = { "reward_dist": reward_dist, "reward_ctrl": reward_ctrl, "reward_near": reward_near, } - if self.render_mode == "human": - self.render() - return observation, reward, False, False, info + + return reward, reward_info def reset_model(self): qpos = self.init_qpos diff --git a/gymnasium/envs/mujoco/reacher_v5.py b/gymnasium/envs/mujoco/reacher_v5.py index e8ba867d1..db23e1961 100644 --- a/gymnasium/envs/mujoco/reacher_v5.py +++ b/gymnasium/envs/mujoco/reacher_v5.py @@ -197,21 +197,28 @@ def __init__( } def step(self, action): + reward, reward_info = self._get_rew(action) + self.do_simulation(action, self.frame_skip) + + observation = self._get_obs() + info = reward_info + if self.render_mode == "human": + self.render() + return observation, reward, False, False, info + + def _get_rew(self, action): vec = self.get_body_com("fingertip") - self.get_body_com("target") reward_dist = -np.linalg.norm(vec) * self._reward_dist_weight reward_ctrl = -np.square(action).sum() * self._reward_control_weight - self.do_simulation(action, self.frame_skip) - - observation = self._get_obs() reward = reward_dist + reward_ctrl - info = { + + reward_info = { "reward_dist": reward_dist, "reward_ctrl": reward_ctrl, } - if self.render_mode == "human": - self.render() - return observation, reward, False, False, info + + return reward, reward_info def reset_model(self): qpos = ( diff --git a/gymnasium/envs/mujoco/swimmer_v5.py b/gymnasium/envs/mujoco/swimmer_v5.py index c49267bce..a231cc627 100644 --- a/gymnasium/envs/mujoco/swimmer_v5.py +++ b/gymnasium/envs/mujoco/swimmer_v5.py @@ -234,20 +234,15 @@ def step(self, action): xy_velocity = (xy_position_after - xy_position_before) / self.dt x_velocity, y_velocity = xy_velocity - forward_reward = self._forward_reward_weight * x_velocity - - ctrl_cost = self.control_cost(action) - observation = self._get_obs() - reward = forward_reward - ctrl_cost + reward, reward_info = self._get_rew(x_velocity, action) info = { - "reward_forward": forward_reward, - "reward_ctrl": -ctrl_cost, "x_position": xy_position_after[0], "y_position": xy_position_after[1], "distance_from_origin": np.linalg.norm(xy_position_after, ord=2), "x_velocity": x_velocity, "y_velocity": y_velocity, + **reward_info, } if self.render_mode == "human": @@ -255,6 +250,19 @@ def step(self, action): return observation, reward, False, False, info + def _get_rew(self, x_velocity: float, action): + forward_reward = self._forward_reward_weight * x_velocity + ctrl_cost = self.control_cost(action) + + reward = forward_reward - ctrl_cost + + reward_info = { + "reward_forward": forward_reward, + "reward_ctrl": -ctrl_cost, + } + + return reward, reward_info + def _get_obs(self): position = self.data.qpos.flatten() velocity = self.data.qvel.flatten() diff --git a/gymnasium/envs/mujoco/walker2d_v5.py b/gymnasium/envs/mujoco/walker2d_v5.py index dde3e9b03..555ca4944 100644 --- a/gymnasium/envs/mujoco/walker2d_v5.py +++ b/gymnasium/envs/mujoco/walker2d_v5.py @@ -310,24 +310,14 @@ def step(self, action): x_position_after = self.data.qpos[0] x_velocity = (x_position_after - x_position_before) / self.dt - ctrl_cost = self.control_cost(action) - - forward_reward = self._forward_reward_weight * x_velocity - healthy_reward = self.healthy_reward - - rewards = forward_reward + healthy_reward - costs = ctrl_cost - observation = self._get_obs() - reward = rewards - costs + reward, reward_info = self._get_rew(x_velocity, action) terminated = self.terminated info = { - "reward_forward": forward_reward, - "reward_ctrl": -ctrl_cost, - "reward_survive": healthy_reward, "x_position": x_position_after, "z_distance_from_origin": self.data.qpos[1] - self.init_qpos[1], "x_velocity": x_velocity, + **reward_info, } if self.render_mode == "human": @@ -335,6 +325,23 @@ def step(self, action): return observation, reward, terminated, False, info + def _get_rew(self, x_velocity: float, action): + forward_reward = self._forward_reward_weight * x_velocity + healthy_reward = self.healthy_reward + rewards = forward_reward + healthy_reward + + ctrl_cost = self.control_cost(action) + costs = ctrl_cost + reward = rewards - costs + + reward_info = { + "reward_forward": forward_reward, + "reward_ctrl": -ctrl_cost, + "reward_survive": healthy_reward, + } + + return reward, reward_info + def reset_model(self): noise_low = -self._reset_noise_scale noise_high = self._reset_noise_scale From 650aa5f49baa21df9100169302287c5900b32204 Mon Sep 17 00:00:00 2001 From: Mark Towers <mark.m.towers@gmail.com> Date: Fri, 8 Dec 2023 12:46:40 +0000 Subject: [PATCH 3/4] Add more introductory pages (#791) --- .../environment-creation-example-episode.gif | Bin 0 -> 41387 bytes docs/api/experimental/functional.md | 0 docs/api/functional.md | 13 +- docs/environments/third_party_environments.md | 2 + docs/index.md | 6 +- docs/introduction/basic_usage.md | 71 +++--- docs/introduction/create_custom_env.md | 227 ++++++++++++++++++ ...{migration-guide.md => migration_guide.md} | 5 +- docs/introduction/record_agent.md | 96 ++++++++ docs/introduction/speed_up_env.md | 32 +++ docs/introduction/train_agent.md | 165 +++++++++++++ .../gymnasium_basics/environment_creation.py | 30 ++- .../implementing_custom_wrappers.py | 1 + gymnasium/wrappers/stateful_observation.py | 2 +- gymnasium/wrappers/vector/common.py | 10 +- 15 files changed, 595 insertions(+), 65 deletions(-) create mode 100644 docs/_static/videos/tutorials/environment-creation-example-episode.gif delete mode 100644 docs/api/experimental/functional.md create mode 100644 docs/introduction/create_custom_env.md rename docs/introduction/{migration-guide.md => migration_guide.md} (97%) create mode 100644 docs/introduction/record_agent.md create mode 100644 docs/introduction/speed_up_env.md create mode 100644 docs/introduction/train_agent.md diff --git a/docs/_static/videos/tutorials/environment-creation-example-episode.gif b/docs/_static/videos/tutorials/environment-creation-example-episode.gif new file mode 100644 index 0000000000000000000000000000000000000000..c8a5da85b64181cbc80b727822c40c9b5164241d GIT binary patch literal 41387 zcmZttbyOU|w*?AMumAyqCIn5wKyVENcNrW4L-1e;Hn?kWm%&|w4i;c=5AF_wyF-BB zFfcFocfaqx`_{VM{YS4}-Bq<}b?sAUpS|VeW%&h+0cZfU8(P4>0|)?s000O800jWx z00062K>p_`5C{MP0T3Vn3IxD`00a<#{LiaE5C8-MKtKQ}2ml8G5Fh~ZKi>l(01yNK zfdHTo02~59Kmf@9Gzx?QKu`b#3V=cZa3}x)1t9;^6A%sn!2u9B015}d;Q$02fc#JQ zKm-7U06-7`C;|XS01yZO@;_q&A^{*I0D=TSkpMUnfItF}|20rB5C8@Nzz_f!3IM|a zU<3e+1b_iRFc1g^0l^R;7zzZ#fnWp>j0A!KATSUF27$m35Eu#q!$DvK2#f@Q0T3_{ z0tP|A5C|9w0mC6+1O$wPfB{f25DErC!4N1I3I)TVU<4G5gn|KZFc1y~!NCwX7zzi& z;a~(DjD&*$2rv)<1|h%@1Q?0{!x3Nv0*pj}0Z1?q2?im-5F{9i1jCVF1QLuyf&l;+ z5C8)KU=RQd3V^`@Fa!XG1i%157!U{p0bvjz3<`w7fiMIRh6KU@AQ%t?1A$-=5DW@} z!9g$t2!;f~01y}u0s}!{5C{wkfx#g#1O$eJzyMGf5DEi9VGt+`3WdR;Fa#8agu(!D z7!VEv!C??M3<`(A;V=XohJ?ca2pA9n10i4#1PqFR!4WV70)|Au07w`R2?HTv5F`wW zgu#(81QLcs!T<mi5P$*!P!IqL3P8aDC<Fk71fT#w6cC640Z|Yj3JOHQfhYtJg#@Ai zAQTXU0)bEv5DE%H!9gel2!#Zp01y-qf&xKM5C{qiLBSy?1O$bIpa4)55Q+jpQ4lB! z3Pr)8C<GLRgrWd&6cCOA!BG%63JOQT;V1+gg@mI32ow;30wGWk1PY2k!4W6~0)<4N z07w)Ni2@-}5F`qUM8T0L1QLZrqTZoisK{t)NJ*&4aI<rwKl}GXy}QG}cmtpXu>QLT z{MRx8V50!fu&F@5awYu52^c;${xXsZc==pby5!epS0p)))$%~~&;HoAQV~@0H3fqS zj2bZg!J5M16i(BjO!?Z~J`6sm!{w%nobep-KwN4Cc**2XPy#V^3A}W=SS|0#grW{M zTc%oSvNDuZHdkrXE%<miosFoq@QGI*t}k2oVZAhT#cW@>Qt5DdNUe9XR+aO^2a7<E z+`_n1>g5ZS5$zI_j%b`cUaGX3t)VauYlbL~>b*fPDN4*ykJ_E764kP?%=9bEqF7V6 z5XfSk&3J`F)zk79xLrZP4-^6W@~87nuUNYIGVi<P-LWOf*fCeZtJ=A$NX$nqudCHn z8FSQ2)z<)zt@UvpRaGIkhm}*i{&}{q^#}gfD|?|G9Z!%4%`3SzzY8+PTz?c-(X2ln zgEpl<KCX;b(7k~6Mj$p8Z?-4cX5gJBjc+QM*ZU$elW>KsuuXO5N^Q+Zc6J$)DECp2 zsfVPC+fOj(>aA9+5bT>Nl$t=^O!HZd?7JVYxWkPhU2F$y3Cb$+OS+#WwF{Fq=Zf@` z-xx9$CD_km?xyL58~~w0H6A}R>?k~dUw21!OtV}~FwLX9{xX?ocRu1UMoJy>@2mMd zZN1bA@eSR}1L5M92F0tu-g#NL3KS)oCF6(}l|0t$dwpMVQgsu2;w3LhrZ2TD&g?@C zeG7h3GzcrV8v2x1(b^tSW)hxHUY;+;h6{S%&kzYA4q3g3t#!oL%ZJl`*0U*0#=Wxu zhl<RNiTD9X*g3b8t!*1O>Zb{+pGvIW*DrWdaAaLf>2atF{>El&K_L=)syi{zmEP~Z zQnKZKhE9x4h|IpCYhBPS%kCk0>2Xy4%cLA=^ws|`RtzoH`fLE0@xWzUrb|wq+-3_q zA8zV%aj4`B#MK*qGhOZ!M4Iq?t0aPf=xl=iwGC%?^s0wl1fl9;?YOcOY}4ol!{l;S z7X9y>sjw_t-F%=Zbv9ytAz`Im64QjYX&`ZlZ;9@OQsuHVi4SB-T(0?Kiu(_0^rp}k zg-<%%J{NUS75KvWt-Ajm9rs{t3Z+0(!fUF^{QOtG-@fv=`QDn;S^a6+uRT$?KVUyq zvRn@s9!~0@pN@8Es+ksb=Ee(@sdsJBm~S~XLT@%Ux}FhzI8Rn2h%)jxTDgzx+Pm{| zo_<KSD}4C#qUGFW`lRi|DR9lFsqwXS`{$RntDaW_)obm&Ib|{TmSwIG<DK_=Rl++D zkxQtiAs#BbYrf_gGBv$(p|_iSPPKPz6Mk)uIrcTRB`5gNUIS9uoqPkocB4T)0SW8& z`5#1x$1J7ZC>hTo`^7}k&Vo3A@z`?MzYEhUEF)o*zkh#$H;|2le!0lIK?z$_?NyA0 z$w|Lor(zVx9(yUKj3zxG8zRQ6%N49kl3SKJ(3_dZ<ZR%b`+C}}nUl{UDmWw0ef=A2 zK<Qa@tp+(!LVh3boV}$Bo-)n6liZvVQH&p}F6Z2iFg-(M?A0P2E&g4?L1#>;KL(Hx zeXgshAT};K${0%O@p^*sEXrg%HS@C;oS9-IaZqT7EXBMq$(Y&ymviojiKXmaUXgzt zvVcvmaF7qjN?oAG2pFnUDQOa)Fn&<TK_tMje;b%Ofuv;-3zNs7(MWSJ`QDno^%6M4 zmL7Ncj@$8zwCsBxT|@!VhNHaVN2^F{=`<?lXC*_PMa0(PAYIe?ebkt5$dtX&<p;*+ zuv%_h=M+@If<WQLurHBP>N*DSt$3n}m?e)1naiXkb#LRXCVT#5(1(2P=KSdPqkM9i z(rJo&Whn85&3^4JFZiA}eQt)adBoBr|B}RWb-GwQi2-$tBcQe0pM(K1m$c(mWO$#L zd)Kui;o>QnkiWy<<y@**s-s+H5?b&Ztt>1NJuKFWH(p|54`k!XSY<0$ZW_!I8e=LE zmOX2cKssNW+Bf%Wu+GGXY?UQTVU|mRi^)H=SWV}4x|exQUjl^tIS-~eWXO=KSaGaj zkfQTjYp%jL??~ztcPyL_6Q1{Y<e^q3G%HyV3#PM?9+a1xN43|N)l=))rZ}~UE}Ft! zlWeGN4oTpT+wqO~<yqQg@O)BNSX?Qs`*DJHe<gQ`lnul+^3_5N!9?x)x4n0~5Be<f zb+wyh98L}I7n5;c#}w^Y^9ooGT-CTW%nPVM))bW%u@stydpXVkcv5K34?@~8*?+{Q z%!gCF_QXog^DF&ijQfGNb;2iqGySi2pUHgp9KBUnbC2rf_tdP^I*8{>D>af9_uT6? zTrJebDyF%8j8BGLRLiRkWi5AsQLeIR^Zwa1hA;hMuaxbw-nX|BEEw5!bE!eCx0&_Q zTD4cBoN1G2`}2;P-_5ayH7NF0$b2c{SS?4(Xx{v&v5#tH7Ig@5(=QO!nuu1FU6n4( z>|tSv8*>q@(eQ)r;W7A+`|z2^N1|JpG81*Fa4AN=z#lXNoOB7ig6Dg+D&e)fF%PYP zh7Bqma%E5?z2ADXSF`-x@4Befp${Bss(e(;*F2z{_OWpEn`MNfU`G0?^REm^3k@+H zGm!YDZXD7&lAAJIF(%?R8`)+An2xuoJk9^mXk%OUa?VP$*66r6ksyO|q>!D(e?R5K z-b-)}l6+Cji)rrGu(;B|ZeCFFYWW7sbiC(xU42Hyr0ujlLUp08OCNL=@JYD*Whg(x zI~y{CCB$O?)m7~ya<6NZayb}>*E{6zq*LtT3>r!?fJ9N>8p^UiTb}%B<hzYs+S1~> z>vF{JK!3U*O0Kng+08TW&vU|Mt$S1`fiWlB9*eIsQ;*37OUrZAxlAprC$H|O5^7y4 ze+cbf*p6!!d68DWVbQPSxf8i^vp5a0tf770(zi+Nnjtr<<XClw=Fh%1KgoY+kFnWY zIeHfTVrg$YgMu@%a(1{$Zk;;TU7ud;E<Cj4kbsxsCZFc(@8`mdsfK);^Yd3SeUB?G zqym)@XhC(D-1^vxd_(UWt!7$~HJlATtF%%VOAYC#b<y^imP#YkyUGPIG`IWms&`HC zn#A!?+2$nkx_#O<@d}&vi@J`*DYvNy3kN}zUI<6~>G=e!JNk>Hm*Y>@M^BG=#Plh= zltJRacZ63RKL|G|b8H=m{<t=HrAoN5eelEH_Ig(Grt{`&el_FRXJ)S}Kl}@y<#`{n z61~@9{=@UHo<cPS6a81K{esW}TG4%NBpmYNMC2S?Ze#-Pz6Idz1fXO6z_HM#+|{)G z5@`L|t9U+8Dc;2Mi(mS*7uKy8=YYC=u`~BR)qDN`%7IVH{1kHTpF`#aH*bWGioNAi z-Or=~jT^oRYP(g;*kUt=V2}q{+<tf83zl?$r=Dv0=e36_cCdy!*yR?iI1uD^tL1@h z=2hZr$LOg?F8L!>_w{a|NeLxvKE!x{o)wi6A_ICOt{qY*8w9})SNs_EgCs1VBsd)u z>N8*woa*BgCKnL@v4o$rG?fm9h$z2}kZ=grN_A1tj=1?7#>C^*%;-v48`(4v*^Y=b z=?_o4jqGEL>L-sH!j2kAjcNr&ZOObFV~idk7fb6QL8L}6HbyTaqE`o^`HcxTK*U=h z!W~B9Jx0O<Y{1cN^q<?9GxFHK{IQqXvDfagx2duBjsLEglYOEm3*u)?G0)>-Qo{Tq z!@|S%!-NrGxOag~4bYdREC&+dB=TP!Z^LsfBEvsMfcC;2v?HmSlxY?Mz^QTS)e%~G z@%zr|uW%SHui|sS@eCeN8Erc`6gWDaJGvSpdY(TX_$Ff2HD32a;`8T;$(spcrEv|Q zgkW<sD>98=+GMQZp?n@mr=;QGjRG$36PiDT>*|D)y-9vunn<pbD9seFv=Ap7PGfVB z3`$E>88mfh`fOJ!=Omx<x+x`6%9a?%`e+-v;}T*xD8nzG{L?f!A}z(gJ{h8uV$+*C z7@7<{_~LfpVmTPROO!-;mlBr>4HhuRZ%Sf+o~&)@l)RsITA!L?=^67TF>TO0$ug~$ zNxMSczfMQ8!SeO@1Hrts?_A-jJRV^IIIclVnI%rC#R7KaX&KMzGib>(g0QpLn6fe$ z(<pAznZ9LF4yF}>G7H`~<)Xqf4|&t0$g>+mKXfo9FV$pAATk%5vd)=OX5Ivj=@{L3 z0MFjUTj9j(?q~CmWDa{|o8#n$I%T3;*$tJZ7dPZQg~z{CNJBDZ+#Q%+S>{}LXpk*h zt}Z05%jbG~fUs~48$sC<-_r3S(oNHHbGCB{De}?>v(}pOvXb*C1oNhue$q5M(CI3o zrRRr}r|yS`VdEAs*XQ=$`LZ+T@q1>yjtEj-Oyg6?_y|)$ym3}y4yC?Nm8Hm^NBqoR zNMg9RWK1u(NlyRH?4-Du!>N#`VU+>4Dpa)!by6sHIJ6fWDg<K{;avT+q$r@dDc;j5 z2!Q2UBMLR^inu(B=}|6)+RY{E%!M|GMG1!m9*asY)rG0|d4`99TZ8!`mf2alncoBp zm>0866|5r^N|TsND+GVq;TBiJGNPCr>IB(hJazMjiaEE6i*QRahswC5-v5S$?5dZ; zzZ84Bmo>u5L)=S$36zZwmM49vxKAtd`2b0$s0hB-&xt6>V=l_QPa;~VNDnURfWZ($ zesi7`V+s*7%|7SMRVU2Gr;GY=xMg?EKYylIT7;Bal9oI*fA2|$g?N@@WuzV*23=X@ zUEpfJ^wJ|L^CzJUc)=2U6HztPQ1REY^5bm<jb8R9Zsj&C8;4SCA2*-Pvzkz=`c+0X z@4~N6hFYGMDo@ls45?5%#$W-+5NwClIDRM)KMD>K{KXbnAgh?q`XZD+qgL6gmLt7T zeYi;DMX72_sa#+6r9fr2XCQDXx5cxjKm#V_Rm5^%XVnbP(yr4Qt}kb<(|pljb5x%U zs}-`Y)n2OlmJ#liQ7Kv0K%xWp@hS+VsKIH32^`jNVApba)lo+_#2z*3SvR=jCQ;4T zW}(%{v(yB>X#62jpB-6aZr#Wc(&(btR3Fhi3TuoUZssXzp2cmM|J|q>-c+`iQczae z#ay59zA-N&Gq|NGw5&c{wxO7%Dd9!aU|FjxPDLC`gOgsQ?oz9To?H4-6RuEA(~Iz2 z>z0MxHe8fUt9NAU%y8?DVtbr?3pYdi)=sl$NqaO?%Vl$MtCyPCLyIhS$9YQ|W)K`f zS^4KE^j;{<L9f%!y7M_z8@6|A-x18>jc539$BA>>EoBkPI-$t=!$3o#_NSybdtKC2 zk(G?U-!5~|34iN-(T3Cdd!(UDWvTN%qWxev_M?6`pLaKZCK0whp%8>nM4wPhiBLkA zP>PjM1`i;Y*(0pfqm<dLg6vVF>J=sepiA_k8TSJGdUf@C^}TxyGJC(a_8JQJ87cLd z==WK8_kC{dgDm&iQW0xX^&7GFI}7)_>i4^Q_j_jcd$;!cF88|#cS0LF@ycD$Ix(I@ z2O?0d15wKZF~|WZ)nL5v;6H*ZZW(~q*-pjUtqHS0Q+t-2Igp10{L~)`R~jmY3_bT7 zDk~o<M-Ej|4VMZJS9k-Gy@yL6fTpM+JWc?bR~L(H*ZK>$?##C<dL5)51BtC8Bg-RW z$dRb1!E~iaR;sQ=1^_-_bg^}Gd3i)>eK-JFGOItJP&~5YJ+>b;Hbpfi`z|u*xN|3S zG=gd<N@+Y2GJbJ9aLYP$zdRn@I$nP~el;?%MpdtWq+-O_+UYs*UHccFa^r617_rFY z$TDD^s`IpatQj_07d%S(IQiCRJVJkhYP53K>*svbz`M4ocWq<jY!UP}Q}cL3XehQR zl>YRc_q3qMgpkd&@X0_Ma+nNa@;hn-ue1%<z^@-Ufr$w~&z)kWo+Un+`un1qW7LL* zZC2N34Cpf}r!%|qdQwkhZd_?r8`1tDtGzLJ^b&8vHhLPlJT34zEow0DWHWCdGGmiE z(YYM<LvO@_t!smxP*r&@<YeTP$f)_<7#Gs~8FV(3dI2qK-jc1%2%p!4Z6Td{G>vMS z{Al*8&nW)coFnzTv(J2q&7y0?Vkz}REY(cQ@QmW;nVOTg^twx?dX@6gAweq`8IMbu z6^J)@<KZ`mzj(`?$_trFWw~v#Y0=B^HgmO&gPh3UA;OCl%8SLLi_0gAOQ@5TGW^w& zta(4`A6v4njW)7BY-YL-KkYnHsNl~|R4jy_EU<Yk5L>JCiLAvbj}IBFctozfdRRs( z&rRX4FY7MQ_srR|t*%t8Y~XJ!tgMIQEgi6#!wuSW%_b;lY!z2Fo=+`ds;p(LEW2B- z-BheWMK(ho*O-;YqE_NC#x_6utpC^=!_C|Z8d!h1x=y^hRd}*4$-c2Yx;o>&df~Q# zg0Cuwx)I=S%2BR;^hI3RY^GZ7j8v~_u+KjEOrjYEFpj~gKW*u)PSHQ@R7CEq^>3{` zZb|*wvZvVw7;G4_Bf5_3Ty48Nsq<;G_ecc*?FPGgHd|aY>$vUux+wPj&nKI~+3Um9 zMrM`!B+C1IqzLRk`|~Sv4y(HvG#go>tA<aj17R&KK3qx!JMEh-B8rH=YzJ%xd$BP& zR%03Ae-24jc7A6a8MoJmEFadZOeTtsMf)5$4(?{N@8u9|XVPptv(KDnZTR}m_)us2 z7#<dG&G<hZy`?^$Q#u|#La<bxkc{pN5^U|LgvPP|SsFRH2-x~9dVF4fFd%w5Y<Sx5 zyC}1|UEeeB)m~E_ec1M}J!z|0UipXCV8`OA8Any5&h~66>ZnDuebu+!^v@B&_~^09 z@~Q30dG@iL%IOWw--?xEl<3*Rvy(X$6Dr%kedSBj?8$z<=Nc%V-B9+U7vn+r92ZMU ze<<ztt9>gDDldjJ&lfiTtcae+4er{uU($?UzWo8XBRIom`@uRcU2J$&k#{JYEg#Hw zxe|58ds;b`ktlF_m56s<OOs_8t0G}{ouYIhW>;m}@wrcQL$&Joee4b055Oz-E9vMx zrtzl7m~XRBf0d~(Y^z44$ItgF&);cYhpP?>|G12fIoN@m$B7L_iZRLB{Si@(S6I8u z|8YYbdz#mNB_x04pHolYabKN(buUVenR6G0e*{_^H0F?ksy>K`+-0L4I_)BC#vdYD z??h3XU2BOhRW~DF?>$bDUTZhnYpRTPx3G|zxtEH;Vz=a|_nSRpm-as%lCvH%;16r6 z6ZNskGsC;yn7cjN<(AdE7}Yy{^813*%WK=4JKC#hu@J!d=^6(B6B8HdqGl`^j7=%% zJ@6~9C-~)a-2!(`x}NZtFVMom)XhNg)QVcdr49vyfwW)p&2Th|h7zgF7Yhb!)&UI9 zy`L~BH22l=Wuj?h(w!{=pJB0y22+3!HDTbgW_UXINUKt_+7**h>v*QhbTk|D#all= zEQOW!V9GD0{y9qN0n!;?PQk@qk56FjS7%0T0XVd;^wh*vo1&@3T$jYfzxE_ef3D9I zC^H_-Ds;T2YR=olat=->h@dDr%qjP276s^CPb?IpBDov&F7p3Gx0vp>9tgSHMbL(K zU}RJ6Ic^MP@Q6LVw7y)Mh|}BidA#ebnhCYO`6DcRJqf`2=$~o$=sB1w(_wc&^W-&g zAvNDnrNg##_4wFbr`V1Z?6&J1!u?3dg+W54#MSo{$SIk(*d5t5bc~}U{^F&cy;O(b zB`3#}S{WPfq6(Z%h6JpS-%EBhWYcnPhEdgbtPmgecIf-|WZ$7hD_1ve@5On`+h`;g z-I%9v`~bsXpk?uM`T}CfqK|mz9E7zWtrgk(|Nb3@&smiz@Jvgwb-wGxU{&I4aJ*2i z*i1~Q>K*IR5WziykSL0F=U<GEBS{m>hzuZcsNOk31I@{Ka-W&W9DAqOL13iw->HPC zZqgy8IE>lB13}Es8X8h{l|rA{Wa?(L+$1?vbtso=)rfHFTzK@I>X_9G(dnGDjGslF z&-b|U@-cr+NFvbr&gN*ZZJ7YOT7Ypz*(qClxwz`RYYQd;)PsUP);lUHUG_T9+i0$! z&s=^q{32GE(n>XKQn%`{GuohI^RV*C$gf+^5geF9$}gH-^sTXkr4G|gEsQS2YQc={ z$)$sva+kc%wyQUF9xC&MSh5h3ZPJYxWcFB!bMk3Y>tz;iFmR=FdA*l0ngrjKV|u(< z*SMnA9>^yW^|T0+mC@bTiqxvJEKO2Dpy)1&q|=O(zG&5dvo=j$I5L+XZZ>sq`tD%@ z?=aBYtZ!+rJq~HGrPTB9d?Gn*?fsq%$w1A$vCnp*@ixzSfqFPD$Z-{%_Hu({t_|Gq zLLBlxe9#shrq7`mOxVf3-e_Rieb}5*{_<ecA(3)@JXxXAX47_;X)rdf^E&y@{NoDs z^{Nj_MD&?OpTX}1zdy*o+X5Xexua!8_ZCGz`&;j!`9;1S`(Mc#J~YRW<9g@yyo~V9 z9YNvv<jjcq`P_7?uIXRmPdr{NF2MJmK3%=OP-}kr^csLkE{;Yp;fE=`{){YD91HjC z836d(K$KeoS8F2Rb&nAiZ=(c(>jWy07Q+}@hF_96c_Qe8v@tHYQSt@WIvqPJHs0)C zNs7K-Av|zn0*6MaH`iw&qBHLZxfrEhy_)<-n3@nLHA*w^O@=Eu8<Uzd$}p=;Mu2-Z z$?8&N*li~xb>MlVIsCHR$&*p$(xw!%2#L4e1<{xubbw8MxleOMF|LVpGzac-qPx|x zUYDkCw^F+>5GQWPhtCqYZ(1Q-H6CPt`z78pLFJ!*#-}8jF))?ND>@P<WMF*zAay6N zo*bXp>I8h>R4@e2W;d<${&qaqH$*sDll&_vP2!I+krYiq3fkR{Sb%_%;j4>xJ;LAT z1B%-A*kUwB;D*;q+#-Dv)9KWxE<@hJQsqAtrD>DfyL8my3bKn+QoC?M!Rc^Cy;2^D zKi(gNx*3LbhBVa9sMJ!SILSV)g*oj&^D~SKRU?YpcNk<v5@Z2t0U-w&^LQ%aw9U#p z;9^IXL3I_@B=s1G=1*G8N$KuE;S~FsPYf~#aze1Fgqn|qEWu2o7jzmA6Y!$zWlE9B z?;5vfiK6_Y2mC(w8d;T)k{cpQ$=ZC)GUP=`wgZk78SaFc)>W<sTM76zOesvuxuDXM zK_=}6%%DD9W=?&yQxiUSQtcueKvb$nccY2+tWL$b!djgv<jW&s*41}~)(E|9KJ1*A zCqx8l^iZtLZj4=xYjTWAAzn{xESjxZieu#8X>N^ly{eH{1a%27&5iTb)D+0r*iobn z4SYYShSIYR=pHRDWY^YHda{Muwu~W$>KoXX*a96DQ#Y=0fL&Au=Dq2<_902->1=Gw zszNIi6}-*EJ~aAy!}?=bpRzk1)7?5)G|wqY8g-dZm8Q!K&th!aKH1o<tu9U9Mf6o) zk&SwwxOH09+~7Pz$3Gf`Xm&)ubV}e>>|(X9$0sIsA?RnVhQq%W^nGc6#h2_WCT(2e z$X~UExBpHAX*_4bpZs>t-WV@He-?b$g(aHg^y!7Mb^28^r*EZ8lj7En)lH|cEI7jM zn@K!T)4(>bBYOf$SQYrwy(iR%J2Az3obiL}kazQGlI543p|hJlo<6RncHv!WMe-5z zcH4k=yI<d$4|RzV)V7YWny{@U4ga!n%>JOi*YUS$+;f#D_u$w}td<EzAHtK~@WGV3 zb^*l4UYnmc_gYR-S4h;>>8B9A8QlJ!HT&r@E1cy}{k3OxkqWSsvcT#xUvRdvh^^d? z=?MRu&P2zBV*zhP8^Bz5zJTplAxYck$@}IhDv#Rgug94JHv}_ACN5=mC&w12I}82! zbvCu5dXlP3zO(gwjiH$n;w)YWBbU`0w+bc@?ZbTkWOa*b<N<$&*YxI}kL^-zHnwz& zOM|m+DQN8WDBswdH8%D}kLNV6^d6Stnvxrudzm0kENqCQ%1<sctmk2owOeGNbHAU_ z{~ZSEZ!nnKHBhsw#it<Gv441osAoG$*J``6i#GgxV{@7rTd+d))xOnVRM%5Ocx%4t zdca&Xr+`Idm-~m{eEOfiNwwYwmu-UT)C*^hVwp8k&zn*`Xs+E2ypQGj)x)@Koj*}- zZ&~~pn!%s_+hlHV1g8DC>h|HLS<c|l`DMxqOZi{%C8|R+v4>*UCs$)s>mJ&x`z;@X z%epU50^T_!>pUE{T}M8r>GNcJ^2Z*2k-nE&H(ouBG4~FVRLAxl<52Ri9<}4X{gf%f z$4>E(6+M(;9m%QSk^IYxnb)r_m$`kuCfL0hv1`9=rx!lN@6gDkGu%5E6In{RzB+!$ z&aBV*^u6=t$}S!1Zh}_yJTT|xlD7SE=4*RWL*?~&f5-50Lwk?c%0mxF@pV-W*YdE~ z;FieAk=O{D=!QloW&z(rckRuC(DNBK9DQEg^3Dr%_7@*388X_jkK55%363c{pE<V@ z2{#Y$NXYyYCe&>ByY&KpnUg$90&hf|!kOa@-X|)mPn4~kG#`^^O~qs#+g=J+Jo0p! zK8U?1rFgd85z8WeD%9}3OpI2Vnt9~cmX`#JFc<cvIQudS>&Wl)?w0ewl3w&q1C}lZ zJV6DaZWOw{1UD(m>q}ftrHuU-l31CN97sv(kx!z>Qf!(vcM08s5D}zCH)Br<UwJuD zsq5oUG5(<H4`yAAN}WOwaiLaeamWWjXG&!=Zq)*AF(AKE=5K<JutOnfD326<IhT~M zWUf?~W_iOW&7P)B>B=doEfN`vB^g7E-rW0Mwj&wH3t1sb*&q2Gc0#hSKxr8sNk*WM zY+^6KyGOT`K_1e#Ptl{IFQz!st^YAi5194sIKrk!#=cy_S*iawOEs@!zgtkhdt$#w zLBA)w->awJd#2xKyWjV+{|Cl^AL)QUa3EmjZACt;Omd+7^FYWlEExDHRA~T7)ZbV( zfHLFmcfhEA2VqJ*9EkO1f;yMRT@LieiCAxQg$#hA&3Ypedg{~%BQdgVN2Jq{vcj6O zpGO9T6PUw}<-^SeLNSKIqS{P8%Kk*w%4YV+4asIJc7F*H=7S9CkcuTn)h6KyIN}Ls zv5Hh`ie)a#XW$J_askT=m}@iTe;p4MD#;{d%34GXe)~Ms$Xwq*B?N&FjY=w3U-V@h z4_70Ht2Bo*riUst`zbFKewzUyak3>CHQ-FeBM$`&VTsZxrTns?Qt8^ZM9EG(rCw<T zsf59NyipBBrP(dTn)04;=MG0}g^8&!=@}(C&6ascX0x+WWB2gFHv5>qa)+5R>Q+d3 z;~&eyIz}Tr#=@$!r#Pl>tfV=j{4-Fg#k*wCOi?kicOS^3yFKcAK5C{vyenKY<<0w- zRrQ=|e4c4|64-Exq1rVfw3JEFFf&#dHT-9Wb-6$(Vrg`^AicOz^>Vpq2T$YzLXU<o zRgE{^g`tdTqxRfJ87pzzCVlL_T;<EM>gn(p-Xrgy%kjATF~V=-GZ<s(W@?+H6RJv+ zpLA3)q9gX4Ck3L^ZJkG33r78+qc02l7U7ei>G31|hNprt?29I%N9FO4lk3uxBqzVg zFl9fG$)ZQ|GG*~1S*3`mrqFB0a2{juF{cPdm8hwCh_WV~f+py{O=+-dppt{8eilqU zvz%P_pWJ{<QPh35%vUA&Rtp%N`t_jBZX*N*Xxe<7y78X)xFRJ8p8nKZA?c#Z{cXlq zNFyRmgG;0ZcUEJtXM&@Uor8Mj<ZMC|Q(caJmiL>6wDJs}vI3ZzRi-YP|1nG98+h^~ zIP$oPCRjOWKwW)hvTtN&`%Xjuis)UPnsUV~TysiLc}^w^JRPMOXgzn4srgO$^OgRn z>Nhf_9Zl26S>%PL@ocmj+uR>Pt*K0{?;`V>%3m%c=g?hd&HZOr<7Qp(`yJ_(Ay;w2 zD|574(+*b}m@OJM@8@i7$N(Z!m<C!dGGGJmxdg8-u7&e1`r2X2+Nh8^&A2HoZ~AaH z%z4GTIX@Qy2YPLIz>KTQmxQd(iL>>g6<?AcC5S6B0%Z_Zqtem5WIlB|z=QcXgBBx9 zoxkJ=pU3g{b@OAHI_WY~#LA05!Ma5Ty0;MB;$YqN5uM)d&p%{@VjguU?huh}aL%1s zF1Fba7wseio!X?CF#M`~-o>Qw`IWT!@FeZIGF`85<$(p=*v-k-UecVa#kV+%%`4II z;H5hHC2tBnxbjj&p>{#iVyTaQWw>5ma9h*~LT*X3EBMneo*w5_(J=i&51alLmBBWv z!H%;|x5)B@yPh@gjFiiA-Hd)z^HRdL9!87SgfdIViM}F^K5_+<_;=D$Q?$VVuOWhM z75kh1X7u6~*l>1vsiSbBY)7YfM1TDxuJUB1>T!9gj(?Kca8|@%2CQ#Ey>=<QCLRS& zb20oFt$J*bWYD{47(8}ZN41*uHQrI^Hi=@xz;M@R@w#wsS7sH!zCPfxwynJ0khZ+A zLV8%Z+}C1wdh&T}^y_%iY8l2FifrwS-r!~B@7Zt7lHisc(jgKR5GlbB>GusXf}yrd zeM~Ja5?kZ^a$^ctV@lVJ1Drt`2KhMFLE39$1G27nSetZwo4{8lX3^`}m<YVelC?sk z^s}#TXpGQ(4O!bw*ktELcm_DIwm4sHak=(mU}OK6APVyh;2D6`9`F*N4yXbAmmmuL zKZ2;PKokM9{=dY&I}{H^|1tG!tt$K(!_&|JQGS0c0hd-Hs0JwOPbLAA);`-7PvA67 z-e#=c8p`0)7+PjZU>M6`lQ#?!m@%7-mor$zpg5-=FIG+A#y=p1&6a#FX0Gt8vsOxG z`NaN_g6AK8y&6ux6d=o+Z_r*M&ctqDS54G=7qM;CaH^EZk@g;}n8>;C`$ygP%i&vV z!=z}sDzaAXnr{{6_%CI>8!x`c^ULYd7?2czrV8xx4YTSjzmMgZzL*uxsFlkqQ<oa6 zc%VC7#_fG2t(bXjJzf`Q&c`TxW;e%bAo_e&?ecQiGhtY?U4#vCvQDdu>Yx!2x|<;y zFBV;(`gVT0+|{3R`gmp^aVQr$5-TEt`s=mv;j7O|TQQaYU(5+ZH570C*T5(dY~!zr zY$dr4*i+_4{#M~%7J}^4p67pQh$hc>N4HIVAHwWu@jg5j=k~p3gAc!{s$br~JMTBw zB_@_dEAI2QlD|oH<9K=*p}}JJ1Dgo~%=?t_f{&U7`XH>`$pDqVh;1*&b4=q@>S$1r z6N%NrqK`V?m&p{>MLwv#!rU#bp81?Jjf_YBU8a-Om<vgQ1C6_SqIryrhFur~j4n7@ z(5g84OPyA60Y2FW#w@4G@KS40aN1O{5AGsqX@txoF-))K@UZMxABy>?{2z;ORMEH# zJF0BGK0K=G#9}%A)$_{wxO#x0?6_ux@94PJTo?NU&M&`oB=UU02`V;f+j9{AT<1l3 z{S21hNmHlEFku7Qvv`cR;f*h<TF|N~{n{`#jx4fnmQ7VTJ26<#{)wW{lmOSaKdTb! z7kO}6I{`{(J!t(I*n~HKC_p`Eic07GSkEDx5}WlhwtdTs2^gG%YRBjO=w4KKeaq}o zmXg;167CV9Zq~CgtPdvRVtk?DHlxpgruM_4pOiVdF`fs&?8Sv_>c;&sE|{8F&{=u< zzkYpmoRKiYo6*8mUA`D6tzvUrA|=Tx5o7skbF=a>A?;$t`zuPsaq?}U{--X}8yo&U zjM5iG#lGDY^;?+zE0*hTC~cco{KVjst0`_z?wyk8m|;uVDFnQ4ukN)t4$u?+2yR#C zjk3wozbtS(Xzb2(D+}KH<JGw?TEN!-?B=Our^_sxtrOgF(mM95_R(W|I%837o|^Ww z1^8X8r}?_@d48kRHBb_HxBis|@38a}Dd+T*2FH)%>1i>sE}FcY_R~QrIO^%{VeK@P zfv`8)?k%PD;?d!2u?GRd@82RPMX)|}u2omP!zo`s^|Kf6wLGHPozBG^C8P4kF!8_f zT15vVI$sg6;^1ZIKjUGPTsDBd`&gd0$KOb3>#r7?ZMuk$8ojC@X(sf<Gs`26OO=d} zQF}*=heqfS+DMFR7E$;jTjDraW*hU^3}0dcH;rGGtFOvRS08)0*@C~+W7`XYOuZF0 ziLHCX5ko|okM{yCh(3KXcGAo6!Y>Z6^9Ca}^~kUY4G<>u{<BdBr<ugq^Ex@=N(;R- z5N+(O9JeF4Cuic4T!{H#IHr_de5)$9;+tR5voY_kp;#;z{De~9xdEm}1?=z4IsI0q z=$6yJp*q77l;3mXJQ2R&k_-O@Ftdvw)+>@^500PYRrdlYc0Lo4ku!JYB40{;(Ex=? zui4<?aLwrLM11(AKr?Qglb;|q@?1`pUERhH1oHCr_(9^8bo!%2YH{Xp=OvK5JlicW z?>&7Xe*ykW-x&R$OhT^QmI?~G@$T~y72kioZHzQP!n66+zYCemCK@d&97sCM(!`=Y zBcyS$6tCN_aPAxWPGD1{puRA2T|uf*wXH#6U68_4=&1W!{*b1-PE&h<H4}>p#2-7v zVg&=f{f|iXzcHBrU=OJMhsyth$?Kgz(0{yP4YU7;$%J$YnV>k6?udUP)x>=}qyFgs z7O9#^2cxN{{}ZY1NTg7l$|T~{7AgC`#@Y4uaN3bcB5=E=5+vGC&BhD=jVysP$>ymj z4rNk^7qyjW(^KFBc&%rHmAbJ2@fd3P;^8CdWGv9qFHnQHrwqjgc9jCbHOIQ=c;vb5 ze&{!k5A$o_N)rEea4YmrRddX{ai0;F%C1I5kz{2I{?zsH>}Ssu(14xBZKCvLhRUxU zG=rH@zE7-PuIu8Rc-*oTNp*H>9(q%(?)*5M<Nk59&!h0{YZkHt<(c_2-Ryg0e^{GG zuk6^i=V8vH>Xi-cbEoItF()EZ+YYax8}o(wwWf07jY%nMc9awFnAyU41J@<8ffcnn zqQ13&$DL#~K#Q?UoImLk!?O4_`D^Nbee}Kc7^2tE)kEXc+z4Mb#`<iu^=xj_sJ5-~ ztAfr6!&kFEJLGF#!d7OcA;Uh5+rC|iyRWPm8E`Tq$Oieh6KW^J3gR_kyNlA#a52n6 zJ3l_(Ny&QVK<=qbq`i|?VeooV=7)p4UX*=1lX)Ux@?CDs#d+9=7@?Zb*V4t9IJ>#I z3$JIjEEufjWpCaVzs-Z*%P^>aXNFnGQey__6=c#h%!%tPnjMrdNIJ)fJ&iDJ!5kvy zt&B<(P?jVLK6B}N*$D@hW#5=xzU4S%VJ*#veT!E(Od{%HgcSc^0+&{Yuhva~soDAq z9q-v5j5+0KET?KG@=LRm@y7-?%h>bFSdH9dZfz<XpPcAgTc*6MD==g%Vo4X-Bb9(- zAzrNCPr~LnYCGR<uqmNYKB&o^*w4+^JrBeI0bbP3oK;Uz6j(Ly1{_-r9Az)1v@nda zR8`*8j}#AI%O)R(kU0(6s8R|cp$WWVI!kH-vG?`>1Hn%jDI$RgoxV~`>XRf$GHcBs z-52Gn+4u8UJtB;enx{kOFjl@{jteV7sL!{h*HiL5pmIeOr`Cxg8z&0slG8q)g9Tz9 zl)3ZDl41D{h(xQ%4KDe3VG^M6iT!HJx0LektAvt5e7I?-vg=4@;L3*@v=wyLb*SNp zv#bDh`;8=4g3A6%K8*d(!kvpN{3yVfrO{IK4ZBd1JPt<KVvL+h&AHZV4pZV#mx|fI zb}xe3%WrPk9&Ik4KQ-GxwrJ?OCs8r)rB`3-P6utqz7*_}COQ3nm#gD@Cvi}AO1N^w z?JHdNkPw=2LD#f;iJ>iWs8gj$W4z`V_FU{GJ-WqJ*@TquMy%IaxqnVv5K@;|0vG>; zrf!RPid_5CHYm{N(?PC(9~l6s7X4!|kP5H52$KM7$1i<&W8N0y)ipTM9q7C=ib5;; zO)rlTh&;!`eTJQKMyAd!MKXYf3hCKfK6Y02%1>h4l-xex2pQ;3$CF70>?ife^1oT% z#T$@Cdmkh!Ri?EFWlY7-9uJq%r<J~!l_MX-wQ$#bCo5?3QuMyU`*p49F<iS5=lxEA zv1azR_LW>}u6n3dPl4&H%~sm$^9Y@1Umh<9<oDm9#oH)K3Q_&;X~l_8Br2VI2m-N@ z(%=eF>*EmK@<_=mu;DCZfMu2ueY`QspD2tYa+!<xq>LwZ{P8H1{3aAt9_ewOEDE#C zo+~O<pE6SknoTD6R%qi`h<mc&Xy)_vAx}O?1RR0GiA_d4?=3%Al!x&scQ?Elcjow9 zRJ$V}lwA9Vu+XT4DxNgkLxm)`%=D^jLi{_=Fy^~ZD0<t7A|d@J2A@blfAN5>x212b zm|4HR@d16o1$8Pke@tbmjzG%@#D4EEpvF{zozLf)|Ey4s>swxt+A|4d|6ONaL3ttx z{D`D!&K+j8M_e%H(8NzH4kn>d=9zzKW?!m9)^}+pZe?CoS_fXPZa7SL9zIv!3}&~d zXOV6TXlQtU1)H}78q%B1Ct8uvJ$v^5?yvt3EdPp*`oFNeMD2gD9M}^}tNhP!@OiyA znM{NIAC@zWc)d7byEISOkx9q%!o>_f+v^JE2(83`!4IUfRE(K3Dg3Nv%19indhZDt zhl0SUaB9U@<#Lrk2gz+FLN#LX@a5kwEKnOj3&y$0ql_!-T080~#zi3R0Njmvf{EVO zSiRNn=ZE=gPu0$^KvErTb;N_y1pe-X$^Ql9d}}&-ivi8?b=mB%EK@hW+c%fp;+^k# z_@bK|{w&AG)?6N{*E@`1S~~?VrWdyXJL8kG+gm<at#)Z&z?G)T)<y#cLr^G!)GC`O zlsj5Od$!NkS{%uOqFut_`9oh>wp*B!7r-c7dT-S)=G}$%TZN6{TnI+n_azw&S&-4& zF1(b90P-={IbTc(w?&h;e0@uPbTs7B*4=UQ^X?|q<mB$0w(ba(r_AdN(@*W8x-n7b z*myo~JW@8GwM_GgVl)aRK>x)w>FtD!LF{x9MJ;(FGsahZB#HWH422qF^atAET3Uk? z5?1n;2@#uJ+SwT_eI;KtWM%fZvV?X$7Ufa}3UpHZB6vTfRwP+6`Y)5c+RqO#3?@tp zmBrPu8d3j9_A@HIko2vnf@g`p-wfttI@|As-HejHG^>oS^)qo|X9p;O(&(Dau>D-0 z!_b4uFCu#fRVBQo`;}?T!Gvj@42lSqX5vUoN6rK375Jb6g;G9cdn0qka&mJ;1B}3R zA$OAfm36FOzT#1HZM`*%soT4r#M&M{OlsHJr*vw$%fEt}#diu3pu&mkhw`{Q%rLvo zI4lw?=`}0LKcysYEL^?Vfjz0tr_C9a{f7a|*%gG8NQmkTH{NmGhqTsxHRiHqG6*|5 zv*yUGBdc1vrS9?Oc+ou-yn%OPmnQqzwfwUGI18jP`e6;oFxDyBN<ZVjX!db-5H0R| zptkvnm8|8Vv#Ou|2g<{Fc6^`e2_p118%2D~0Qi6#2X)PAG(C#T*3oz=^Ss2uHmkl^ zYx<RYiY_fd`TQ4-r-CBjxpVzioMWatKqN4dYnjZ8K(K7v)=U%N%2RQ_S{A!3*cl^G z(YT$^VCG7g>g8FK12@+u32KsSh6H}RseG&Ciutw35(E9IX@8=Fx^0={MbgbtoyoXu zdH0cS>v@0INcCUE_X9$=FKiWj?;ztbzP^FhW~wm*l^sjFLgtE(7gIMKKN>_GunD#0 z#OKM*Jl_eu<|OW<e(*l^F|>+!1a9ZVOArcBhkunJL<_rDp5SVVpTsoPXiire7N_-A z3O3;uX?Uy(-pN}Ag^~C@vwCT_Ahd3ZEGkrtR?}CB)51lG%?SHGat5kMU@wLYkS_<> zzjZy|z*a0F^#TQk7k1^Ovtx8m+>Hl=j&U3mg2d6&&u!y$qOdYtB$x?gM3l^Qh|Eor z`KBhB_%e-%k&{pX$z%V>3mPqFwEo1xXb<M|%@-YE%%2#z88V5};*CD!W7gZz{lF~< zEV`uK!Q6^piK7a|{Y?S{R_HcL{9bF`5>b!&{5I%#8wVXLKmIaYGfJ}{^Y(Hm=ax=9 zk@aUZAP-tl9_N0|mHPJUKgA}(H2m2MFlMME$9LOv_9t;kGsaRNjmfZl`sOTGMmz%w zWbI9}jBt_e$+WBNNp0GuJukn5>eEYREVUAI2CUn>a^vguZ>RhaiZp$FRdDf9P~IYW zjE;mDH!)j+3tP+|fbFZIncnTSDb~F(6k|)APsYSZs`qmuKHj;2uc@#|Q;?5ht~Mo= zbWHjq?o_(gSkb?tnIaFfMwX~%v9-<seW-$JIvUwWF$0kJ|JqFx0tf-s|IYu}P3Lw6 zVw3zg!x7Z+c9G;ZcD=_7#eh4b&1KRP`v13l1wEkw6wnwahmzTNv7jC`MWY$~)V@^v z)$46u{(jHUWZ@<1d7zU8MW$coW5sGev&{r~<a4Dv1Kx>vI$Qio7Dp>6V6H1)sD1u; zH!YZ0wp3x~U&`A^QnAwVzVu!(zOHJn(JwlrFudWHafr(jmLb(^XkT+E9pzjZG0S8} z6qhyoeFMGDU@jfYcPe-IQl5!Ms3G;$s<@Z8e8TDo@0+)?<xX43My>kOh6wfNw^uq1 z=fRcuzyAy;UEA#q-ZQ)qay2^NLXf2IQnXsQ<*#i}piZ*ega>v~*NRYV8O?V$m~J&W z9D4l72Q&@t*dr<(b(Hio_e4Kg97ZGV`Hd9nw~(yew{9<8$?{Bb(^EbxK1>$BvkwPq zZ&+RXHEe_->n`#fm{v&iLV)$;QC=S^UlAECh3^>zZEd*Isdo^*+A@3g3$Y26y}R8q zJ|Ysxx0DO1Pl;Ef2$N2*0GmySEzOn`#=MsVQ4q?0*3nY9HX@@=v{+?eFeI7{D~h*X zEnNF%$Na{C(_u5|XA1GZvQUm6+H<9B3uT~LmK1TstGBlA6-<Z+nQ&(vodp$W{ns%G zOAF|A1Tw|_AqRUg{_*#FrE>ILmYUym?~BsQc2O9KWfoY~mgT&43T5#PD-mV#dFjlM zltSW2ShYjx;XxI{LGuyx7q^~<+Bkt_8N5kX>%=Y<w~(oxH(a+odLUWxPm{F2*L2>} z{cc|K%d|O*7Q)|p8tUd~(`;?}cdtiUuDQ2W^)|x2j+?M@UVZN*SO~=KIF=bc1Br_x z^@stgQJ<mmy2u>$S+??X39@J_+ut9C`D)~kJ-ug!7%KK_^gw2KJd#fJ%j|x9YgC;z z=y_3`DN-bqiQ}VILQX}XoEe9f2wt0b-!X96?5v1l7NH_2*zMQEhGikMgt+x6G*7j| zgV$U-9Kt6FE&W(k2<v-vmD0=`z5bV>p?q$}g?n!-0bzd1m&q<KHLurR9%R+W`zs}0 zt+{?mqWu|z`$b?2sJspFpID_6u+3b%d`avttg@&!87<;|pths*wcR{khU)nHGk=UO zlJ~vwM;TjBnl4_a4LWN)h2OHyTrr8lz++OhHqzODW{55<3U4c{mp|mtUhiO{1aCuR z4q5K@i>sdQj~l)sHqTl(P{_-9JJjRtUKQ#Id2@<F0q}AG&q1AN6lZ?eO1WsGHI0~p zXHxjyM##!baj0g2X9S%N=4;|EeD3W4za^vRQn$4~QqO`u6o9apO2p2Y{=8-seurmz z`;t8Q7ldw`7T;|^iZq_kUj#3o5H(mYrs}U22Eu#4?#M4j*=HZ3y8Mn*onPh+x4Ms9 zt1)rIfD|=zOr()!K0!CAkKHiN!L=+OXI5K^sW~nL0ylmf0?E;56S=vJ{Cu<ICLwaq z6@uBcHOh(I|4FtQ8Ya9=@@1fv?&&;^N$D%?J9&AD^osZ-&Ac<wd=N9mR6_Va1%vXf zprRFCLJ89NfFWE#!OAg77eWuLHt$uNb4;{GZnMeg{62qFPA-K2Ik}krjmasYd8Cjn zDSY@}NjbXr<1W`1Cgm@i)#<gsJf`Z|5q^g0v=yZyj=GRhOYX|dw8V5G({L3LSdFC? zS~25Z)37IZLiSDhj*xNveYxxR1g9tOZH|RE6WmyztulLxg-@C$yanNT_|zY!v~|=3 zvo9ja=(kt~!zZIch}>4azk|A&rrbYI|1=_HP-q|@O=3_FL`q{z$-&gp$4H^fE|xEE z?)V}O6GPS?4pdH)Wg})TGx&T;C?@x&bGVao1;GP+8}u_Eg*r<awo)zm=7B5T*`LCN zg&N(s!&Ubr1ux~9rDqS7AJXd5Z@pMJ2P1mv^EH0VO2G^_N)%pW)%~o|H$iQ_(Qa<G z%ZgDdHr|`>%vdB23jpUy)Ru_fp3D?bvaJi!vmjUlIBTDH3a@Cm!S_jXfjA50eCmqQ zbFccU=y3iQ;r-v?Ao2eU2cbBR5}Bat{NDfN02W$IApbuc!2cBv{yBgLYm5JX4&cl0 zzw$u|%mzdMaR8H{3GsuCW=r@^@mmk$j^_ORer+P<CXPmanoPsKrAIN;)Y)$hWhpjP zuC%z@g}8wDs@6LE&|lCfHU8S@4kMv`s;jTs?1$1R%9gm)><p*!Sg(#W)$V0~7mK7( zZiXLBm!OB*Hhro)n*U`woUPnaf3no*bhJ9!(r~&aCC@Fb2j#a9jEW6RWEB4Curu*f zv47dS`RWK!YrQts)^c;Y-W{n*tJ>arcd<WR_I13y?cwHZYdA->qy6#W_Tp%5yrbja zV#H!x_rrK)vF?w}P_iC?$G5*8NGQv=5k#tGu@OvOxvd>S<+{HS%4*YS_`gH0{lAc_ z4EXi`nPZF}Z;+w?RaE#7f%RW=jB$i>8Cv~QE)*pCzg^Z-bh)7%djI9JE*gy;v>n1I zs{Qs0Boc^A76vg=50pa(_Le~NI#WO75eE%90sH;CLSB^%svG;c>M9G8rMfWA|FW2d z84TB-4Ak2G&m1$wj9OQ-ehvDJ;=5#Nw3q%JlEXj)K^$!ce4!P>V{LNW97yJ|CO8|W zH5vM}LC|`L_<xvt3#YoXcKZ`|$iW>B?yey~a5%UHcXxLS?(XjHZoz{~a1ZY88UjQ2 zec#*Nckb`bOwCNq{0FCYoqG27S)aAmW)>eurT}$e_hf6&eSn;1AXHe<S^|h-yLe-B z>8@l1AjX_zZ0fXy^`Nwr{o1*TGt8kP8e28Isf|}wN@N&m-aTP2Vls<S^X}N)<`|Gu zwtYY2JDQ?~um6|M*raspKh78!OuZdSo@}-qMqOOG9ZuhTxE;YfNL^qLJ3^)SUk_j5 z?}z`c9>RPe#b{;j>C;6f=E+R0{qqlwCh}PG9&tzI2q$Q(1BxdAmCB)xF3CqyaLKAO z%Nz`ZQ@QDU0#oCuByv8fr;mdaDJKfpY0$jg(aV$ykx~YEm#)j^lN8fDpxVDsEJo|T z**B#*Q-7~8i=kz}Z?afv;m}0uECW=pB^s?VBCpI=Y;^hZ)mM48FiW*d!{h5j;!ueX z#^T3lEpae!<a}k(Os>4L-<@Qp`AS6ie0n%+S>iw8)#hwGQL8QI;)&O^u+mJm!>@g% z+E?47-tE%qabpqC6>`46mUizjIFXZp1=6a#+^!3;99m~-zx|2R&Gyzpoqap8i&2HI znv?a*<$m5BOQfc?V=MK(mumcjy7|uiWheGe4@e&jFmISESVA8JEG(S>6v+j7zbkF* zTu&U01HwRCg&}>7cJI*S5L`v-+#pID>U^b^DmYB%A8ny2QXe4948o`vL~z4c4F=L= z`35;MOa)yY3IyOt)5!d!42o4F_!K!*6V*|Vwtb@4+_sW6M!8ax04iy~kg^F;%rx#i z@f0a7PW3!JiPvV;$jK!2blKv0^8-DKFe-{1d(wkq->(oJ)1aLZV>1O5{x&lMz4}Mp zyckMz1v`qekFmDm{E|fKq0a|Lr6%5zw3;M@9<-q3`u6`MI?+Z)RSu*nmR0{tqBA8% zch)$kqG?sJV3L8^+`pa>%Lo6YRArkE!=u}{Y4Itx<&^Z~OmrIoN&?N7oYl7T1_8qs z>=)YHML`yV(L2_`u)6Gp`wy&hbK%#$QXtaV1BkpFfLVRVIxi^raLfqURxE?$AxPJK z8QJMJL)(919cB84VRA!W2WEnZUzeqCp`H#CpCPSlM-ZqC<R%L}DjdOj$RQf0<cTAZ z`ur-xb>`H>2_=|oy1ZC=_3(LF=M0qyp~|<O9qUG58~E4<ZLkTrm?iL!V;0p92%UfU zuDlgIY$(wEv{zx+@wQv`eVbqrya+F(S#T6bdDz94gPWZRGbOtF(ac);bYN-P;UMcf zj@-Vl1etHE@Qfzo-dEE?=sB^iQ5`o{auCh3kwsSh>tf#k>U_Na4aI{wQ<L3ps(S3V z%i+{_qRvz3o3S!6rVT5vbM9`V=lfhT*7i$T%MY&yy`}t$y(~&hzki(%z45--)?EC0 z8lut#ygt@UzFnN5OYG2Iv*dwnJZ=(lZgO3kiPyf{z0GDlGb{GMy`sQDA_?sDIDaTn zzjQ4h(;I~VghI~P`cWaJLwpIXB^QVU-nNNDe|Bq!*D(rae<6TjAgxB(zjQon$ZLDU zQtX4mLk*&1-$Gzo=n1!DhWpIBw(Y?dOYP6-_FA5g32s_<r7jXCZbX9i-ArgvpM^wj zb$g?nGybA1&d3jtcm30)n@dGHl#Cq?FgPsE@{AfuZAHY4miw7~1=Uh&Hq;hOVMrJ+ zQWKM8eU2cExq6F65Mr4O*K|=_pnfWd8<&WpdtY=2x+kE3erX6u!)so69bNg|Xqgu@ zwn!7H2mU>DQZbA-I7u`iMB4vzypm*`uVqRI{l1Q5N?%8tf>l7Gp^C_pa4o2Tep?6v z;_CwodB>FekP+Sl0w-w4FG|_sI%GEcXru`of&^}q0(5WuLD^r<IgIy<13|C>R1hDV zZ7w~zCSFV9R#S}|&Zonz{$^Ic>#{Flhb5mOVIzU#V|gVSD_vd-!x4Tb1p$c3Mf3<z z0&0nckL=iVx=qu57;`B=WK&_wCPnG&E5+Wv()Lm}CBrl6Xn{SYzmMHNgua^pGtkEA zZ_s8}=r3r4@lR;8`wwVS{V!-E{C8+GPcz~FU!aZo!r!1x-D|`D4%#5+K5ljEIv)u| zzbqYU1=y!}&}YEeG56+vhJ;9jlJNZ=p{}Siv0`JAjPT_Z8rg&MtjEt6_uV~YcCyiZ zll&riX@{nl14$*0f`hwh)c6@TK{?jVb}N(ta^nZjH_*D5DB@eQ_qUEsfzlfB!11Q@ z-QkP1nrNn{rom`J@I4)yt~YCeStp|`k9G5*HNUlaBYgc=j>jAuc$VUIQ8*R2pLUST zj+st90@&#RdkiU&=GLgC2|?79W{P^de?XhY&ZT!~b0xN+#ib&q9?kp1lsM{}`7lAK z$USzXxy@bjc6{&e$Jj_wF%W)i8H>5GYYnp7iaim;PG+K>2TZ^I5r^?+l9^8Mnh^Hy zd1GsqLHL7ACCSn`g-_3^2iccL@WeVWd7MV6fx*GL%9=%GrP`Ql2?PN_V#lg!B3W&v z`RL9V5;pNJ;XgGX&DCd%3OUK=L|-3;OCl>rKW10>6}?Fs%TV8qlz6o)q|iktE{COx zgFiZEmh|6?=BE`UKUWAdW1Sr7x;CfKm$@3_Sb>c@w-J2(;g2I74@x&WYBWEm1nAbb zr`j~s%#7sgxn4ELS2r(2RCM01d)j(-WmAf_{2)ai$=ap;CjY4b{Dq!bA`nrjcf=o4 z;8WIJQB|Klgu|H(0N-k={J0(8;Tw6kR7nTjU28@3<8`}33?j=A!(gD#s?rcDHWbB# z!U`wGk2$otxRlB1dx}XCkNsK;0VWM=u!bgD)^C(4ybklt;PxLz`HwO#78zT{>lgKB zjZ0#^C#2bDM2JolXNeH|X;!mfNGw2hu-+O26PRx@EiHICC$m!Sy3Z${nHb&fww2k= z7u#drFrkXb)5`0YQWE$$mv~-1CTqWnmtW1`?J2j;LQxQS<hiUm@f07NAEZrJAU0cX zmQ2u{ov0jFy6p6P=5M-Gfz9dH*TTl!I?qU%aNnQ&WzH#?^Wf@q_Qp5PdqLw9>780Y z)bIV!jZ$@=u@NHXzPB9{lP~DlQ1N)Fh%Lu!)<z+RcKhRD<Gm)R0!bEyqT!a~kZt5K z?H9drtnT9u(I+;b8F^h-GJUNO+P9F?cBjUTX@-w8OQ|P}Za_z9FnF~9t>5K?eFNsb z{(@J!V5ZcaHQ*7x7B`#6R$|O(oMGx{?C~~l{P~|-ttLa_Sm2Zp%<6!g(<X(6cxb9* zs2FIrKKGh=JGH64dw-CcX#!+jmG9#KW@}j_Y3T1<1LU%Sw&q?xD{Wjvh$Rcj?tKk6 zx=QLeqHCi73ZRVVf`Ls;#pvJu@HivG65|uFWcyNN7!9n94-6;dW5Oa(N6CRM+=Zbx zLff<Fp$xZYm$V@>QZ9_#Xu!vV*gH!FG_<sI7M}P>67_r(K8^-q(P?RCs})IKJBev< zGWb`pA|=}8I;r&o4b`J5qD7TT+mTTb=+O*R7B&EVF|(T*VkS`LHh~plDidKjjFrNr zl$Z6uBMUDJuI`CF(|w2%Zq+EU@G@P>BRyQJU@meKN(f(xsFumCkc#Ufo;v0uJo%?- zAR1aT2nMx5A4<I>-oWjL7Q-OzA}JneU}~f?Z6j$I>Caz^?KlI8L8;8pP%H=TGrju7 z93ha(O7h<mD;^tZ1(HBQztojDM~Va_)+7b4Oq6K__oykY^hAiB8=@j-Nku1Uk(x&5 zt9isq5IQqdms_R^6P*hTW#}XzApYVqfcJ4-1t$O3ilXQH-*XxEzjK-1zqrhYe{dPc z-T&k=Jt^WL>Hp#~Qh(zz>2@ZnF@JKId&@8XEtf(0pSjFMZ#Ff!<vK48%S!BhVh?ol zW8Kl9Jvw;Xr#TC?AGIGHUyxV<wZoB8IqOlsns*Dy{9GbswC+JGAM)z=&6(h9jS346 z*1a`cYv*|xEHTbi8}#ZozCph2M|5m|&h9EJQQc^D-4(h2QBkPT!GG1ThrB&b1g4gM za0-2h!x{q(k>CwC=#ptFqfNdU00Dz0Z3j<bmS_LC#}(^X%&@O-*ba?j;3r5=p`*JM zmSmu}-UNrGBJjmT!!~+)Y2BQYHJOb0TC{&HNMLj@SLxLbZYvJL09G+p+Txzn`W{GA z<QrGFKNTZmqOKCGz+zfxNG40OX8~1aJS9ivzqsvYnSV%{B7}Rl8SS)fR$@*}Vw{}p zmCUg#`4UD`>bo*AcaUeGA0(d`z=?osF{=H9m+zg<fdTx)!$bX!WJCe!w9RFd8HMfY z<>_(@??~o+?ddSGdf`pNEErivl%`0PqvWW%gvR`^DwGpPAhiQaYDv8r0{_&W;fQ># zVSon@N#}c&SyidI3I1^zA;~S3M0nL=c{Ep_#d*cy=;)I6x<}z@mCQ#|?Z$69L(|_b zbtEskdVu*_^^Ypg46SbyV;8x-P8xug)%4@b(hrtn+j@K~Fj?(K$r&td2L|*fec(G9 zEOoG4JZB&hVjks@7nh{#dd@nn(*cMr@~b8!IRg9dfEbMg>(lw=x<Mjz8SOBp1JBtM z){FAXLXf37`jpHKLg0jgYWlI5$+6Sq*Dp6KM||8Dm5!5KhGRE9SnwF3Q}{Ki>_hgZ z2-0Y<#tgL}rnNU==cQ==Dy|$Q$gY!l`41?CMPygEO<Msv^p7ivIaw}0j0+G-#KfR4 zo2Q*78MqD$LVTRI!*$^j76k>)sngDjZS-YO+WAYRz{aBALCiA0v3Z<9b@N`5X?C%^ zT|_qTkzGspr{i`yY7E~lGIPDt>5FRHI{=J<$@%j4=8sbMOA@RP@N7~q&x6>Xy~62_ zXaIoXMcwD$W3zmW%<xO1x{otT2oJ!1;#hDywnu$@v3CM<gIvqPG%^m3TG|uE8zJz) zYdp|kn}5rF$|@@%Topp#^ZvkSpfDTZdIv-o<bL^Q+*A3&YIec!FR|3%V46s{hWJly zmBH0v;e2m}b;2{e*7`kZ%D<K#y13S+cy(|UUN2pRev2N+G0d$!2K#3dW?Kb&_UYV` z?<Jz4Fu#**I2`p<J|lZkDs6&IJQ|2TN)NjTttZ&xZ5Q%yeqmot`a+>#4+gKf5gmvS zj3g9G@;acv;AwV~tYhucqNZRyNGuBlHsQV_o{tIR#}SvNJ_=6kmBoF&8v6PoF#x77 zNg|=JNx_CIBBNo>%JWmT1Ys%Ob(xhExeN)7^!q4TRjqu)PxR{SB@DK!A$9D41@ru; zp`X;SY5d^JXsZ2{x{UoHVK|g5Wl)j8R_F}Qw7t{RG~(AKXg(D7DA|4YvF|ujpwrv^ z5-bB5Ss++n=I@_-b+ovciLK*N4gCfDi>$G(^}*QyZ!=+4QppBoCrg|!)PiF>!o(B` zq=ed~5*+?N;?k81vEj|#uiX`ck4;I!D{(=8pRjuG6ILIr<F5((CwC!wDfx#8gpF(P za~L&m(4QDS_*A04a2SmED>4C1Y|%&xrgmTEa$KHFIyWEZ%VW*E(8i$3I1W<S`~J1t z=UKWOqiiA@#u#nlz2)#r4t2>)$7s_Ds0!e2aweI4qE;)K!&o({Y`&UfyO{6nrewYR zP33?{JM2BI+3EBf;YY=yS#K*J>L=^+Ru;`NJy<DD#I(cRF?YJ}jS+2Zb2S;T<wh^5 zjpi*yYX}4Ciw%~Q8Xvn|qFkD-qtY4hM7{BBEH*k9X_57~ANp5^8-654{dAm48P9Mv z$$D(DTVL`s?T(Jm(~qA?uzf@R>iN-qm7Nf_rpxEb<-T+xNBo2LtJTwe7F|tQ=bQ7} zCHNtcZ@*s(u{Q-_C{WaZ#x2JlVXJuE6Y|wm-xr&)nK*#-qeKRX$fs4oiR3v*;oWY* zofp2Ke`w%G=Ayc7ym`e@pf{Oiwi89!)o2j>xm_(U7PL1^9OE4ova9{baP%b-5p{Vx zRvAW}#93AmOCwp1cEnhT(3?slTtL>`I94gH)hM0S=Se-?Mu<Bu6(UM(E6t>yBPE@} z1_wVyte$fr-8U@R)YA>iV<sxxe{SE;2WthdFp5XxAkX#qBU+Z{IrW-)63QL5x5FUi zT;T&W9<3P=g!{96$nTA(G|agdPbo2NPmQ+H0Q$>G&95_r)Y<`2a;wsz6`oUz(FOAI zqAAWlbqLF|rX`b#v*w>JCuc32!F1=X?^S#6{d2`9WDUp7C+FW*9Y$$8ZWk<Tbg0)) zVn4%PRb1#luRF%}yp^-B$ivK@)O1}xqr)-#3eaDD2Zxik%UWfO1OUVAkffQ3Ly+F( z21v9(p?l?Dh~2L*J2iYg{q#5E?280!q%<vF3}fJzS`MTntMs?y6lB|He8N;!?95^h z^bC{qRNzQcz#*->8TJ??X;uQrE%v6j9%;vZj`z>m1(uMi(>@|ihMRe_MI_g8U-G=G z@-ryHhjnANx2)^(7l>=SiIGc&ht*Kw%;7%loeI}}6Lf3GT}dU`hm9Z@l*X-0L<To~ z$guZYDSLyg2ISfz^L)(eC@sIE`J~jk>(s*kbmn~^+b~wrZ4^V^+VI1jS*L*F<w_mW z@a5-l3_t6>d>=PprZK0=^KNOi+OwbgWDb=%fGdXgK_tw!tDkU9X6k953i2B$*;Mz} z-HN7d_mp7AMjW5w<d4@Yzg+n7?T7AGL0J<&pjsbeUvnr4nLTvm>bwLN+?{>D6%JDN z3%OYL6LORTVn5s@sT)7yk1*gXFz<=8p4sP0E@;RT$Q(X|8YRN$!#$_>n=@7LgBU$L z!B!gI8z=1d9q?@mD0<R^>y$3A4*9UYPx-I+im{h#wjpY_zz@#cY`|orQ2EGw(7FBq zGajRXbfW|!otOxli){F80$i4VZ6kq8Y=G*V!EYb9+K9sOr~;{AcoKuuKyU{qU4mhf zPpW9&3G5<V8x4uhVBw7PP9v<fjP|;MMP%|PY}nkl=8X<V(&EO=mHI<JG=z#v$g+m9 z+=O(;&)0wrM<y0!rU5^O@u)Fg`_^ZMA+o8D#u3`bQpEiv`#e0LQm^Px9-c^>b}*u6 zHbC7oW<1`3LxL6jBRM@2xW6!59sQ^nbLeEkLnB^i8Wf*u!<#P{8-~ilz!r&mLMQ{Z z-z6ksb>=k}h!1=%{_HV`Kw^BG`S=0?c%wl_&e3l+`r64Zh(_k4M1?bVKgqSYV_1TF zYyNQi2tc1<CE0){*xOf`PAAE?!J9n*k=55G`dt-Nd?LLqPErhCpY=r(_UFZKXQSeo z&dK_;w>GFgqd^;y4+0bEC*u4P=1dPKER!N*{lr}5M~_P*MbRb{3aBq0)9vob)W|qy zl*K%WpQ%hyTQy#YOpnME;~3T&*jxDebXNmdE)#&tNbvm|O1A@>dRI7l|2fj%kY9rS zG1C9lo+=uIg6r{Dd#bv;5m+p}AyFVGwpcoe)7E4BNs&r22tgao2yV_)JX?&5^!Ly^ zJIQB{8%?CWESs-DDN8qczQ&d)(Wwgd=1D&JoTua|)H&LGHd_d@Hkzf$K{=P=cr$wD z-e9HFEe)R2NxO6+nd*UlVqDg0zn&;SC}s0W-g2NXkS$_fdck2}I6>HOFI273d=%R{ zTjzH(tJrvkp=&a-N`u%!YgYV^2~C=db!6@GY?%(uv+m5O7S7P-&5MKV9U)l$hOOI; zKU5DNEzgJEAu64>ud?>M?oIY|8AC*<F81daokOsnGi*JkPVA{F(>}Z2wg0XQiIuo% zEBWbnNkXlw^lJ-y(*l+6P&WXr-b5E{V!9+Z7~Z8dH-x|gRv`$BU&J7+8r&`4{N8@= zXE2OE&Q1tBkLyNwLJ=%bhnv$R--D0#zCeI2*vu$S!iU2kHcZ=$G$8^6wPzqI>bC2` zuJe?oD-u1tlW6rZ4Kvjf^hA+{Bu1s346<p{P8nwiD^BP65r%4D%9@s+>M$BwlzfBy zIGy;GHliE*sVs;(Cz%7bFvrG6bykI3mV!jK`Yt3px<y7~Gsj;B?>)5<FK(J2b8K25 zXIWRelFgg-=AjU8S5!=GT))s>=InLMQy9T;epHcHVbEGq@O>rSs-%Xo!m2itC*w=r zjLQ2+M^%jYZ$^5>dHd-fBmGjtQ?7#$KIFUu<td=5V3^T}K67jv-?!%(LD>!rmN+uD zhvh0FmJfUh5{?-|&WeQ@k_E}G$(I})j<g2~LYkT6eIk?k=4L|m!>A&%FrzE|InpzS zJLsuI?Z5~jNm)lYU}e}wd<!d?4PlqbZf7`#q}hP<pS<F`?%ht=XVti}-~f<&1heC+ zrVK6vIG*0|J%kz3EUbntGWM)ALrx#2WX4W!*IWz<Ip^q}-cIi~gkW##mdR+4UDka~ z30;6l51OdsJ>+C;YgSbJ?o;Fq$gaR=B8KelS9(IuW#^Pvrzujlb8gWdU#q8+)R3P~ zVtL<fzD=DyHarX8OXT05^&s-c9-zt4e_Ssn&gR`NNkDlTL_5O&csVyoFn)>&()xYR z3M<5Y!AsT0_rOoZYj8HoRpaw~%C^Bj@CLA*I_-|p?OeY8z~275hb->Bf%%be?eT%9 zdhhgWFuL~C{ZJ1y&y<r_Kxo&CSRFVTz$Nr?T`<{Ia7>h*l(HZL`V$<tH%lZH@-A>! zIkoR@nqC8<OyjqN+ox(la+BeLyuJ8KCco2DB2Z`+${RXtsQ*u4YINnFklKeG4^{Hs z=sP4A{ak${5Gc5!yl}!Iq^xu+jNy2{@*Zgq2Np3E3}N6!5VKmv9@^@{+!OMJPgw?8 z`xqnq5x@Y-W<y0JJwSb=p1t8E@j5|iJu?gml;u_tv3h1(beh7wSd!0;TGH``8B6B| zX1vlcHV(ejOYo);p|t$hiPB!%{SqeM4KPK5vyJdZ{HI4H1{ITWPDmb2s6nW*=U<&i z)UffN2B~;hlQ7ViDRn=NYiiVl_COU1WPW7V%QberiPxE+ea{W|+WExkY2a^3$U-Yx z8TUHvy#Rvw<*F3}&``rZ-!Bg350sHASCCQWEe~>pFG<A}v76EkJ+bp1*Q-7u2z>NW znf&Pv6?S{N*?;an6`EQ@_)I|lKrYVm-Z5iH?2QG0c;+-@WeTBAT9)7|uZkGA(dqD7 zqc<MpG%`w{5CR3Vhn8~b4ck+|-j?21mQ2$++@#UZCk-i5O|@ep!~3dHg2u7G`zf## z=L9apR%lek3EdgE1sqEvQp%NKRp&tEt-zYovPBCa8LUfJG#cD}Je-cuVjQWc8@EUl z$&RSr5INL_rK<^ebgpJF#t{Ge*E#^~Gk6`C;y*_F>%Vhge<EAd?e6ey7#;Rj7J<R= zcb63T($&uJpT*eAwFN>c^ei5k@Y6=)8L0XQbLy7{;@RAYl<eH>0@L{@7s$$Z?1yqu z@~&dqa2O>r{=!TU5Y3o4s(QV?YYo_&>)i!1s@G7*H;dgZc5iK2l3Zs!fo9+oGl<Q$ zOWhve6up)$mt(D==-;a}9}R`-ecL^<q}#4{t9;Q|J1x|T<OXtuVN2W5tc7~L;?%P% zR~%Std;#E~zF2e92zlX?br}sKP}J*66_JfP>t0n@?0WL>a)CihSRPq2k?Z+kKqu9T zRNq6*#)W=o@Y}^V9?sakF7X?k?u<H#gokr|O4;t$-CE=60HaA=r~6QM3Wp8fZB(^o z%WCA*NWIackSwv^qHyVwl*@)2#yEY+TdH_i;wk|s@#Kq+<YKDZP7zrmYOdJLDrphS z)i7$LB+cYoQTb7~(?PuVYPdk7`6cvd5!pi`Ay??06jeC115#rak2`WpsAK5-08x{) zC1=dvvjz6}M5^nlwa`zxl9b+Jgj$we^ZTKcgJA~Y7z{@{S-K{ag~?tvX<0H@+cc%# z@wgQ7ViO<d5Ay9<0vB8Y#3Xes5A;W<3&e|$<m`=Jm&#HzUDVP7Y?D0<oJ;CK`O1ix zqS=MDXm26wrYQ!`x~lPhxD|zdODbwrJ@+nnq~EaU_U!#|#4Kx5>yPP63B(Z38ueA^ z%2R{WXsOCq%U7y0DriNL>vr2+icA+-G%e+zQ8mxM=^)=%w4cz<$)a8Md9Jt7On*C9 zf{Mbfs-Dr%yljFAfQL&7_y5I^=_oc{)xSPtK2`CAg&(OS!*Ftu4`ZCj(xJJ0cU6Rh z9(Xkz7mbb7O`R24FZBk6U|pR<naS3d4+ALtQpO`)H-(O^%~DPR>&rAY`1<8YhILUh z)*g!OY)1w3II1|;j0RQKqq1{!-S>NckYkvfhKBZs<%QSfDpUiqG(aOnq`^b{K&EjW zfdAI{umL0JVYL)*^Ye09g9u}JHE<)%IbYi$%q<7ndyTs?IE7(;*Q@yDLpA=W48x?| zHwJaI&qH%u8!ITC&l@%0&8PUJbor_ELL8%a1O=Cf=&Z{x3O~j9!PeGa+_hysOEA0h z@_t3qZ8Sf!4mmEnE9mwvUaN#9^gQvY=>lBGr@wRrB7BkI_olGr+kN^ShClqw4`XOH zbSZq%CJ>=2_xlcPJp0-AAp491)@#iZ2SVRfz;8)d;C2T!YCPrhIybpLZ@Ub%^G+o> zJXAo!HNyC4vmgplKe<Ch#*vCyr$DSV0}tBtLq|&wuFaSNzY;MFb~Eow5`cw9N<-|; zNupbrM#}B87vi^AsJ<;TW^Ir%sks$I;8rQXlc(VW_(wG1;J}(-J?SLW1eaJ!+2WAy zK23O<cvQG^0FIiF0d$*MZ2MEB8ly1LvR5(j0Wy;Z0%%s6mGv`SJ+tpT8UX1Cr@^m~ z#;+RDu;ADM^9Qe(fK2;Oq9CY&n@&@p$T)T#_j2!NMwU?IHlr~Q_U1eQWpqXc0Wd3x zFcqpd2xcssgu_WnF&iL+RG5e32hpayfADGdT9%oASIXr{B<fa(LJB|vuN`(Sq@R}4 zk^Y2+%UVJ6*;ATCSEL9+eNeQ9<>%zo@JPvHoWrVR7}-}E0Fa(B(GU&b$~&AS6S2e` zpKv<PM3nGdA}KPh=G1h>41Qt_oL-Y17Xb}SfAUEU1LwZ+0LQ#fAxIL+8z0`Bm&70= zKXFbTig59zgN&#ic<V!bM@^QIQXLF&l@@VpEQ?J$8_QlxDPE4M;UPJfkC&AAVSvUX z*h5(j&A9~HidND`15hq`SFScuuF^6(pB>#8^Wm7*@lgFseei!thkXW*`8#NQ*Hs|i zNd6<S`=92T|HCH?{xUGhCA}5?CXiIXoi4x!C()J!ED0Ln=CN9rH5z{_Q$wU29sRO7 z|NewQIH6SYyPx$Bb4^4O?P`nN$M!wkL!F-)J%Q_MV<~yFdfwBo2!9%w3^7S{B2`#+ zhAat%v!&bWjWgnzG@&ZnZj=kMg(2WiY|WWLc}f*J6YWi=->SdxMEz-C%Ko^sZmn2q zv({n$gCVocs6MclP*OpS=jr?Yj{>P2+0H)<O!`<KYWdy`Op9;O;81_p^XJ|zM{R%I zUwXY?VIYC*t(SUze_4+H^HhlO{r^}^K#|?D<^689dSgjg_{EXbj=2&U%<3q&*uEu` z*&n}jA;BQYPZx+mHhSSY6?B66qKQI_Vc>kJPzHyFNDMTcjgX5m7_e}vR4=@L?aWMz zt&*(nK^x$E&&-il>vY4UF8|P3ZubR8hrV^P9qMo=UXfy*x0vsVt+0S*t-TyhWb<D5 z{ma$LbQr`?GLhN}oGgnWsX!t4lDik5qdJB{5PBrm52-Ngo#}C}W~7z?jX+-8*q;IM z_S9gS%%<L6r}|a)R%2(ft;5?gBzyRAAZ_(~#TzF=Y1V3Jt2>cQa8t{d&FH#5u1{~S zMpd6Fvat`fI_3|5hf(By5%_XDYoIz%lKb^lp*YSHNz4sP?6^)G!&5~+HCG)VnW~}F zOap`GM4k<;7dC;}Oe}Z|J(C^CoShUZ%CZ2b<_Eurnd!y5a!4#E-N~us<eOzi8lFu8 zFOBvvr!4;oWOY~&<x7pJ@JY$UT-h4p%r#x>;x4u5t3sH`N}L&I$?jK6lt%&uItYrr zR1TP(H326Jcq9=xi_$HxyFK_NuMut>0cT_AR`m?A^0a+jL8RSeK0OHZ(p)5!CqP`P z4Q!c#gP@yvNjY=@jc^@Dx>0%s&#^^?K;YY<Mp0p(Te+Zbtg2;kk4QS8$mBQ8ezkL& zhPt3cZJAYEB<C2dinjBJ0oZr63T@|jA!F-6Kp9tZ?W!0IsUX})(V^oc<LYtyVL7}6 zGurabwi;pZx1Boprhw4S6>$5KB>66!{hk*M<5Gd<I%k`9oAOo0Z)22SVpHZEH0(Tg zgp95RdRiq=<=&fPQiBOD;0vWtp=a~TKeW)V>f&i95e~hHHQJZth$1E@u_FGY8ByW; z($EN1)M)ozqT!U;Q`5d5y%y2NvtpKO_9G53)%g}<4u9dqFiBWxPQE(eBc1j)nUvc` z_HV{vTXRIl3;@6vaWjlHW<HrSb%F11t#^4UgHf{rPHbbB>Wr+oB`X3WCk15>%2bvh zE0Q?6G;t8e^fu7ClPS&jB#ODqg9HN#7(_+^Tmu10RK*y6^HHgMJ$PsbIc)f#Dt*3V zoZfB6_SO;_h9*Y69Hgr05FN(HxWC54{P6pBPSy7J-I8g|@B5#w7w>IrAxy82yD7G> z|91Ia@ZV+%@9y>obl30-Jq{^*Um@Sse069^SQk>fj(K<aV#Cnq3IWu*H}^jH!bj#p zp_j()F_3>nAJ2s$rtXY?{O}bYNgu}i8nt=Y$R93I4}q+;52{+;l{7LB&T-~5?uuOi z<}FaxW>}aYem9U-BoFWwARz*|KShRfnujLN9goO(6^6b$44Wr00GDhRO5dN4*yJWa zhbKn%1*rgvi4=_`J<cB!V-sc4ZSW8j9szBoj~&q}%JZWx0I3che>F6Y?^(eL;cAEI zu3(6}?~2%gy^s(yZ2$<N82dF7jvU%O6NG{mgUD+{idr@zTOLc9Ot32~fFsF2T9aty zHB6~`G$PhsA6?D6EhwKhsxVWZ+|W^kD>pJCdVihd8d$^}uq+y2rI;eOOTk(p(P12{ zkW@uLNmleEWic9OHeyf7W?eR6gvJ!RrcBDDekA1p;h5eZX{cF`D@&wOpT*@x#cN0{ zwdIGN^;l`bbC*^l=P<*Ra+8_21aU0<gc+YJO|XYQhCAV%uH>itLPdjWAu3&bm5l+N zEBFo>$l9kVKRBBSU%|&c>N{oQr%_9OMv!;ol+T-yG?4i9QBkCNHuX;0@Kc7HyivMH zQG1}C_zz*F;Ad89fuP+Jp)!Re>8ltV`Yovp9wnb+$8vUQQcR}jo|(O=uq)pDHy@4e z4-R*Qn0yuTbuIbL!M2t6zlgNT4pcH%?sD8XK!3N;0Kn$JK>wWKy8pur|M0IF9{MI* zUYM%*KEqM*yO2I!77hDx>GOM~)tiFl@Q63Rh`&G1lOqveaBBXCyd<gq`f`_8u2dmY z)-K|HUA9;wmxKZ>)qFY?$xD&0jo7>|o+}+;z`>(xGye@#qLQi3%(&XE(}Dmk{a7*B z$cRYiLd$K^*DgZxmLUCf{-x9`j&Ys9&D`KeZiblj30LD$hd266Q)bfL@mPvN#e!8k z=YD6iqtwL2YL&&eT20um3>}ZhgEsDT2bOm3wq3kgC(-Bi*LItOb?6(6i!Dxj^<RD7 zF0vYLA7?eSEp>_2y?>otgrTv2C33oXZu+&on8nZU`gSU_pY3}oqcZFBLKv!NcEb>w zEwEq=yBP>2sYc?8z%!iYgJ-j#7eaLJs_%>X%0(KKL_tXu8Z^;D8u7IV#xP>OGj!XR zB7S(qmaTek)s!!t!Z22dGZZ~q0xQ+fkrZuN-Axv+IU!166c!_q$|P+k3R**XPoeco z8-5a7a0r35mPTt)@_3mjWheovd2xnuGbm1)hc=8{;&%Q)MF%l`7u_d);$xf)adla# z7cDZawuQgRBTzMtmHRNpB{?uQJ5sDkHP6WJ*c_x9?P2DSA8m4+7|qgHu3YopMNwgz z^J7Ipw)ubTqIh>MS~tAnWSCc&kn)_B!#P``Ru`yP#<qgTN}Y>5ASK$!Z-!@}0egB7 zr90k>C&u~iq3k9<?^kPBcl(l~*>ys#TH5val3!g^Ab!bU0$%^1yXwb~LSh2WS0kQ_ z|5nMk>={<3w;#r&Vq*e^g2jo5oS4Aq%e*#{U5{gm^Rje=Wg;f<fsm~1dIeTL)l5M} zM6v>?IGiT=uxC$iXVF$Gm?Xfc(C+4i4Q1*^D0H)=`*<wpV)^7Xt?ozOKObBso`S8X z`}k92SO**z8LoT7+#085IaA)w9wwf2l$-jsX8G!eT_!WQnH>=cYgg%U8Qdollzu*} zT*3HM2|~V|Jsn&b@Wx3P1=#S+p{D$*m-v0{^?V|m5XmmW`Xu~xmaOXIxR(M+hT10j zoso9}a|GZ1;(HE}#Mlmuufz2eosT210lx9mey3@T=gQ&%ibB_Ph%W0cIFR)935_oE z{UXne;h(WVueyI(Tb%s;JJt2W#q~J!`&`!@!xscxf4=RdBYCCUJUKP4*ULO1E}A8N zJ>(KR+p=+0M9dUeDd{=HwQ_;1!@cHt<Z`3PGp<e`hS$eN62RzpGjGs4XH*kh2hqb( z?*eyJs5`K_YR!2u*5Th^<NJX3Q$ZYlB=;5)-(OhlgL@?WVJ`B7$<ojDpyB;7Lt0o# zVSs*`FPhBk)PPlmsW3C%0z6~0ZudK;$Xs*{Au%}dbiyg%J)8(3UkTFu6N~|imjV7_ zD_^7YG}nFY3eoT2&`|mrTX^twvOKKjU;??=ROtdFNI#K%?#So}ClXxPQow<Xq7f6F z#8|nrq_P1Wm_RBnK3myfIN=OwRpuf!g+D-)|2i6=PlP%l&W?Ev2m@a-rj6TX;2W)v z4J^!?U+GWD<Q}&e)FNRW!0Ka2kFgj@guxk5m7Xq(_QHaPprYa)HqA$dTO|jz`(p8X z1|i{&(!-OI+ncmW`TqzkVh@8M+?W0iVzR`{$QXz}67A$O0bW9FAK1YlxwTtRbla)1 z3LZ5=S}^B;5KCKQ!9qi4D$FnaOGKrXnG-5m;}49eA8#KU^7bmMeK4>%DyS-btWQd6 zR(_VHip2k2*+&5z0aF5dN8^910fKrXR5pv%7Yu>NmK6Cfsfho@yJ&wc75{h_?cb@0 z0?91NKi)-CYp_@wZK_zT`NvW*)?D?zRQ#u7|K@mYtmU7n2s)kdKVr)7sfa(8ioG;I z#_RQ=U4!5xI4hTmghs((b_roQC*NhG^db`1TZYUp?6c)O>y~?U4dBsgPrxk7?*x7G zt>J_Rmo<kw+r6nERU;HMC)2Iw08`km-}f$A)8ED4hJI>vxbO96T<}j4@r|9V)Q!c+ zedTtJ9a=pRe_elgeVa+zCGdrUNKtfcni$IRUuVI|@P!sL1Ny*&%(fhkwAnX<uza94 z{O>q$4E)J{+-HUC*)Qcu&^8aNn=zCR$2pVxn-zp|Erc2dFby6Wi1QB)E5tIcF9Q52 zc~XtS-hj)6@iI0dyD>O?Tzlcg+9Ud6vM%sNDboJS8NnJhX$onq@nMw77U69L(RPe! z1)03%Z6<;0>tQ8X9uUKODZatDhjw1~ZMxaPL>d&4VaF*Y8TR}%hlM{u%VwU!I-@$p z>ES7PqS+HPWu>t*xaKa<eqymUug-YcA=T|~L+X|FKe!JwiuZYrGa$;!NeUYA%#4Kl zp*=Hf6{}6@ZK#`7j;l-7N9h9_5#X#!FgP(zY=lwBHS4C1pHEw1pLhuC(yI&8^BC3L zsp`LPW1n~Iz&x%r32w9Rl`vUT?^ygsU9D&=Mvz9gIVVD}lf~Koq*FIM*k~s*<d;Fv ze+6S~1BP!KIoF?JyjLei7-DrTMUhi^JxbqldOgNG^oO!PyC<iN>pE!Nh7m<SJ28*j zl-70+r6&jM0V2aOOEE>oPO_~zPEGidL%;z*km!*MTChqS0MJjQTLHx#qbvbmdgSpH ztClm)RVi3Sr|GhlTd8SZa)rB9uZOoNB<2z2wOPK0(zWI-Z>k^7KRbW0H}iQ=^|`QG z+5T?cvoY*yS_}gtVG-oonC05GyODWbDg4FeB4q$u+p=9d#QzZ(I!^xUtd(lQ158;e z+WovoF5BsZHjKahXt>+@@!AH`L2eMIsq5jQSGC)D)2~t|{%YQ}=4JaqplW(TU8#%n zZhc4gX~7Ri`Ccsh$&qJvgiH5rtEk|-`MAOd>f_=O;^wMvE?>;BoG)Ca?hck(TwR=; zW0J5Q82okDgL*$vYeo*t$q?^vni!kk4Z{=hByD3Ld0$_%4RP7wz8CQCzTkSR8w)1< zV9m^Ky?{_?7XMacpNo)tNYdB0XDrU>rGh}z4iX45@XrM_S7E-jA&*cqpSG&3gL`_m z(b-Z%Ar$+GdvEjsL)`r=c(SItky{Imt)k%jc4XFG`C_lIy>x?hp1qfu0s^hEPivDw z#z^Eq*dswbD8CqtC*9R4SMg1E88UNAa>Nf|abzn1M>V4Ya%}aF0!UDCFoFbRK@t+r zarLqOTKE(nRZyiI91_-UjKp5x#x|k#L*VL+2piPKR{0!aW_$5)rDz6~(;ZTS=ncfI zmd1Wsqa<RQ??bUEvg2DQrrUfsU?$)O*wmwi1)XlPM5#*~B+EL8X{~V7rE#xg_D4YF zQG9w5>$$o@$-Ftu*yBBTlW|p?2JXI)(N2_c&#*9iu#B6YHiMd@HkI;hgjX=WJ%Kg^ z*&XY>s6^jJB>m;(GWQ5dC1T2YVj82<5IBg%IS^?5;BNAJ1R!|N)F&ncl|6M%OIl#i zX6(^0(#XB_K{hUp)f|q&zGmXG!f0^r>(k~mB;}&w%Y%{CiAL|Y0<e_!hc5(bIb&N0 zY2?b_gu6pA^>jJ8pWc^uke3G+m@5;Y(D)#jz=2tZ0*DP3Y9ZOSsFa?NI^9y%b(t}H zm9CWLESPVMtFFD(OkWpTnob`-%j;0i+~b9xc*x^WhwTjg9PbO)$x-DbWmvKPsHz?) za6#VhSfWr<($s>da|dh{!JQoc(v!ylo!;3g0WBQg;2HK04GI9J46Xo{`KQ|Q6&mOL z8O^^k)&Gs!^6yObpR{1)uV*y>pS&*4B(E10Hzfpk{4kqfxoiFi(fxT{Yz*pwOFhHZ zt;rb$WneYG$TROw=B9a|yocz_GYf2C@ZD;*k7uzHP(?U3_fPz*U=@kXYPJsN7}>ax zDLuIacf=BaF%q77j%oe@@E`O#cus@HCiuVF%<|Z7K>5+J|B&^rw94G=bY+zBq24&* zIFm`ogR`(H3oE8O(`H2V3h%UrhC`EE{=O^(tMi~EdghJ+_`7V?^BzrS`TM==*ZmLd z^3OEl|9J26KE@pU`1^aL|1aIU|CvUd%oYB38u35Gy#FqZ*b{(AEZfog2P~q~{n62O zbMT%<{Fj*bw>0A0|J^j=-;;6%27Fb{auTm@i2xBg7<mIQ2E5z+NP&q*`AA0EL<2#l zZSMG}cjxR}7|OEJR5Up7qmrBe>(gX{yjWRmto+kEWmbg=pkUTOF(MaY4t9@^l@&AJ zPm|l103>msw#kRnK*Wb;YQ8I0DO#iO5@>cPfx9`Rchs?I-MQ+AX(6nj9XYmhvkySk zb(*8XL|G3Cpcw5)vAB8u(NT#RYZFzLi;Bf@Vec@m45?(iGHL01{a$);`ll^7xv~;P zJo9Q-7>E1}U?=rSZSMXYZ6wWl0u<11)19ii<C@kK0D3M{h?J(pD{7W^_*K~Q=1Yg3 z2DR*XP<@!*UQsj+b7?Fdjz-5+$qg=f&@jD^f0ljWz;n@^{aSd@17*1UL+lEo(ylZk z8md&lp3~EmwU|`NZXmIElvaEL@0cZ@7~R55nr%DnyzRJH>u99iL_?tuamZ_5>i!(z zY=Ao_Lqd%8fxu2Wt~%pp>hK<4A&sA^v-JA{D;>>1k_In(jiP3Io!}4(4|S_nNJrJ; zS}>3Ncf1KRn`)tgj{4DdG0R(F(2#Yp)T<Ti-P$yS0)vRd!y8{kE!A4&!{#F=RO^OA z%|{oBi*5L(t}p0}=4Bc%kabP4&dAsMq>ism2Qtx4AGXYG+wPBwDX%$W7)j4tBu6CZ zpHAZ;NM&*=KOB2(&`!&Kn+k5}^8BfhkeJ^P`%9LC8H4PGIVQ)I(ff8T1leQA2==|2 z%RJ?rZ}l`kn+Z+*grE0f@LLAd_2?wZ@OpU8g@*(~^X%*Iu*My-nQQdP>6`Z!u-$WU zc7eiSvxN*Dh>8$CXwF!_Ee!%OFAT8Wpc?D#3c^QrY6iW>!mo!naJft3J<CZS0>kLS zprt~&vU8aOmji}V7F0iC{Xl`g3_x8XYV9Gqz6{xdz#n=;f+#~SM-Gta$VGdAjwByG z45Bg8B9$-`?&!P-lUGickW?M`7WctJ{WcAXX}cIwwoZ}mbi3^IwoB!SIdn0g0CEqm zw;FSjOq6#U?QXc2{N6sMcr_l~6hn+b64l*Yd8bQDO(HESFe1D^ZM|n{I4DYiOsswg z7ZrEJuZ|@oJ;RXtq*a(_aEe4gBN-JEe)z&THip`fl#K3)saT;d0ZM96$PAu2VKZK@ z=ogU5(p_8{#mHg!rKk(Z;R99Fb<`x30lS<#YI2i9Qng6_L#&|S7P68G)pQ{%)54&( z4|*n*IyU)`I~6~wk?6~(gAy|eG*6&kR!85LkA6*WlxE2e>0sXTpy<>I%dYq&@1I{T zchxJUe0JR)wM<azhUM#u*zySjHAPK><I$IeUBVhk_Q;*d2ksIQ?UAStq)7{vFmGd+ zgD8X)ni~fOl(HFnl_bb;Bd-}O3K}fJ$eDR)s9X}nxc2s>A`@9$U<{qVi41=(NuOyO zAP0Oh-WE(*s>B*#E^CY&5l;vkQ%<r@MUeqTESQUWb3~Y~TN$0J_`k1a&K8hfSir}T z*x)kdL?NxPT%>EnufsNaOcR?)(a-flGYyo6ofW%Y;c@gZ&~WZXTlHRo%PD(sL5=w) zlBv(!y=lpnWL24CKl@etVB~;bMa@h4l>vhV<(TT=J*Vv|ZZzJ4;d0HQYFg+er3U-t z2^mfsd|C_YTU|Dh`9WbeY?`&}1}e3jXUm2cINI&)W7<j>TJzTkku*MB3FuX!subID zvABy()Fg{D3v{u@6Esqq77I!H7=d(s$<1p)mTAGCxO(os(7+m6G+30bxU@zZ5m%Ni zK+q=zZkCGnA}7m{kGC}91vTmLNLXrm7K9q5l2HvZD*W9p@W;N`@UMOG?fnCQ*|*34 z-=!-5AAJD)AMcC*KfbqU_hU3*{V?*y$#^ifnV^!?0O5#r!wC_nNpOaOwC$w^AYudd z0!1!LWrI+@!NBe4f2}+4@9PfZvcZ=(5t_Y3ff=)@B-*utsgVCzd?3&xQ;lF|DF7g^ zR5C%0dyHKHhyl=ErjvoWX%hJ|HvlLFv2&2)3-f#s?}wpIF7#;|YDchpYWXlTMsc|` z3!I#1hDj76UL?ie<(P)GhkSNV(J9%&B8%buG3^}R`s1UDuUKz9u|=G}-HxkT6=`SE z@2%2@b4Gfek5hf3%}q!J4hszuVYZG>t6W{!w?isij*oK}Ob|;yTlOU$<|~Nt6tybG z<5w3b9<RiugWJ)be!C10myrF!Z+X!`-dl0e1!{M{=$@*^2TSQj8@uTBg~5mW#<1m1 z(F4eNp6093IVl;Cw#c{|@Plxe%%JxB71KsmfLJyXuUJ+sa_V5g+{c_ibKJ)~kAF2$ z<RD@{&g)8W!q-?Hcr*1;_f|i(7i*Pmyf+$GLdZJah;3R~u52oU{yE@k0k5T;rDD<H zTRw<1!HRQqCttgI9_vnWf5r|HL8OK5i*?I9kgCx6r=8&&FP)%Zg4@F;pvt|mH}q!B zQBXnrH)?Fa2C`{5@^S;yqQ61c)xIV%&%MM!HoeDaL5cSB$~gb6K5I9^bNx0m<aOgz z+#=CI0%ULXuYKoL+16w57x?lVP<Q92KFE)Z@tIvx{7*vF7U$0U+J;hmckvR)tt)v6 z=bc9z1=;n&_l>UyCqqL>p`dXtCifWX^USXE_$Bm5(b#XqPeM?TbT4l=tJtjVA}x{J z(qEf2)h|&f`A(irJwE*W8OH8FcBwA!g?*|6Bjv&qOn`#(9Z~P6KWcm#$&?K;>eiL4 zgU|s@zjfG+65N;J8a;ALVD{>;poT=Nv3{ioLBQti8!2}`2@FXH?lLOu_Yy%Qp1Lr3 zt^6abZBBu_n0HLD@GLMSj7=FRr>6W9wP-;UgR;<TkWCHF6i19M+crWhAGkLf+K1nE z94_Rfhk7?W1cb4OOrqU_nRIEVMvD*g6M@59TN+%7i;EU99R7gsUJInYu#Y9c!$fr9 z2UAp3Ga-dTLtd(s-j>#W14Bps)!N4QJe^eKq?gW<-!F_3@2Tizz$ly|#Os5mGKfbJ zCTTwU_2X_zgrP1A_+Tt4ZcNlLk}_O?KDR$cEa{VGai`ZU*UeL%UyTt3_ea=B-8%V{ zW)UjJ*(XX*dieBdulXK;gAAmFV+Jt8h!>jUjVtk|V@6`beosf+q%10(`aFRIAH%hr z*IG15EkTJ$kaJac)=hHI&mytqv=(=)TkSccgnK+%7HF)%Tw-X9_;V`m_<RIXvs+_! z;W92i$%s@&nUo|zQ0$@%d(760Nr~)rl`|ekfvHqz(UNgT45dt-riYR2CxlkNQr4m# zfj%K7NyP-Q`Lw}ss=7m$i7JCa@3-+#epTE#wdOD8Ug=>ZF{Axa^$c;SPg8NDf#dS& zNfj%n62Jx8zTQy|QNOiNosTq1Yp9mqUyX?kQsCozpV6!YoajC#NjCeELKQggm9`4i z4;K^3MZ`IW(SXpP$fp5tzL%mFx)7kaG8_v(vQs0q0EI#h?RZKv>-F!v6{zn<0zg#I zViVwsqT^gd76z3;yR02mxXr~pR8!aX;3;AAeyW04Pyz4=C8nlm`<`tiP`F=2)4_!= z0z&N#Y^Nr!Dp<FEK)w)piGvccDhDlTpjmNIa2|~AY9dT0h=!%f(#cFwAl)3=2vZcV z3OUo>PhdKK8e1mBw39Fzxwz>WL(}(hql4{85C2VrmAvxT`U(a{4*;uvUtihZ*VkY$ z@b}X5*v*>%a(kircK%VY`*XPNy&~g(Twl?UiW|t7!9X4^k>rVFz)<W6(wDM7J75YV z8AhG{u6A<&_ItNboC?>O%$74mxV)>K>Z^T0wAlIre_sd%-hbNPQZstKUt!SkUN``G zf+65Iw13spsKw?DMWTHSA9rIa_#6)@6|F_XY}A_odjSr?iH;-lr?rmil6V~KvpRx| z8=5?60e8Z_mIa1<e1c#|eWF`Ed2TxAyAYf1OqpBGP?Syp$Kk6~IMQk&TFiO&MMNN8 zUYQ2VKqBX?+2G5Vsx_&f(}2Nk3e?d|A*UmyO8Q;xx`+S*3>E=P(RhY8o23qc9_wC? zD7vxCw^sJ;*>D9m*?so%&2KZDtNoJSD$d4z-P*A<RMHOT(w0Zh$8MSSYQfnr2!D8S zPUhuKgzwW5sNJ1dHmp`wH?<oSd{cdWQ~Kt``E;W-W>~yjeIQ>rvBL%Xd8UZ(>CxEG z8urF;;P9S2Zo{{C8Dm)t@RUg8;lWax<k4hLF(X%kYDS_<f3`OtM6iMpYWkK|q!aRF z4=Vx06};WFes4J6j6m*d-4^4GGb2+S3FJZraNVcQE8#Yg<;L)d74HZjHK9ufGSnX$ zM9TBiP%sk<DWfKdtfwZhW3#XuCqw>7R0z`3Xq}7WTZ5%Y(B(AGj+Cv(>2btBRyPTe zB2=3Wb=~JuLIa{<0Mon&Q&!@!=lgf_Q3jt(66p;v15H&>EYL}V(%~iY@l7m9axsw+ zGD{vwG^Rg?iyfC{R0=H}7Jzu)Oeq8(mOvI|+}0Kf0PFSkG_5>JuJW36LP;vo*PdbO z%$VjQTGfVjRX|-UL;`Bg+&u-Q*uC`oW9g${#&N=`Wh<xUlR_TVDjt%WvBGOUpR}0c z?Z_|eNY^xh%x090mF-nsc(ozzVxF-=_fSHmT^U4{)8b1$?<oK(2Co_k#;aki_8+4# zRzfgxg*7TDh6KJX*EV!t_)>`^E2|wXoK%gGXvVRJ4moMldwE@rra)JUh&PsyqSEBW zL-5MUe0Am=^!DR~&q*`ek{p%*zgn8hiDqaPr<r(o%PZdy3MJMh7!p#DyM}FYLZ701 zRI`x4Y6qrEu``!OH4EfhRI`)>3Y18jwIyPtSx_8Di>ME5R8DGp*y_bb5^5k6Fu=C- zBXht9etQXDV(ijwsiE=II&Uf2=WXG;7r=?0IP9;}#s)^uv(|u0&-4GSuJdq*!|m2} z4ACP@^xivBqDOQ_XG9wzVi1Hx^cKC>(Ty<7=q2ii8bU;!=)Ff9q9&pQVb5=0-?`5B zowN5pv95Qm_j&G{OMm3*=yUC6;IY|}!S&ApzS`?w7@4E%lQFG_|8tJv>l?O9bxFhP zHdT(vl^cyCwiF}u$t@H|D{D*%8of9_3b|>FUtQXtcyRr9J%s4@a{#uvb7B67{WhVH z;EL{XjNWlLeb8FBV|F2VfKhmz=ioK1lQKcBQP?e%1z%~PhpeqON-U9m!HoV3W2sRn zb3Gf$QL`EgqAo=7T8r{_ln=<ba2$J6L=Md6S3^#v#2FPAXbQ{rvSm?5SrFKfGNpat z4=|44`h%ihKJ6q>F-kPzj%5%6e-*YdPI$eEq$!b2<wK1p2)GQebj21*9*QS1MXoXt z3G~Zej=@wPf2Y2c)x7wVn(Fs>nu~)37d!^0x<wZ8lK6wzJ&b)bHn}ua83s*K!5Lxu z1I!A8gSxL9V72xR*9LJq@na1c`|S=qi$ud_juY8cUJfM7r=r#p#;`thlt8+Fuc0V+ zPS<|PBL{8$7wa&%G&7I%i=6?tzA-puz1ClprM?HGG0#f9R7Nsq$fxpcKI=a3<N4M; zWe7=5PGsqmIA_$b_DFj8b@hgl%FY0Qg1Yd?qf|B_Zqx#Cf`kjNcT)s(g|?X_5weu~ z@m3lqN{iWVlUF=BnUxQbI6=u0z-3aP{q{Cb1v$UHWM*=b%I+D-5!~eGIgnH1PMSh; z2kovy;`?cSC70{|=0C*vVKQbMQYod78e9D)bTg9v*F_b9dGV$jawh)j+yxf>Ix`Kq zr`1mSRgl>q>b1eAAMpQ^D)#@!r~l7B6m`prL$(`#uZ^d=`+wup%yT$D=068fFTXd) zt~(FS!xKR#-?iYfY*|wbg&QMzL~m5laz^}PFmbYv)GcV+i86d7sa^n@zbCQl#(DYj zp^$kJTawwmKaG5<fq46P09x|7h33zM(mZlm9QrkWC$CQ}nrl_qweRo%sAEOydfd`U z2s1WIpj~EvR;G2~605^7Q<OJZbt4vc$Kwl#^KrEp%rq%8<Ci|^?tjSeKziw;+s<M% z9!Y6s!-iI^UzjINXJ(68Ex@Y1GhmGM5R07Ed}#gA6*N89NM_pkSP+8_D^a^;3$YS| zg_RgbPmcskogWe_CGZ)po-CjR44YTG7=wFC6B%aUZh#pXgk9XGR+>$p_t&)uf2tUG z$lWG2cBgd*r`71OZ&<Sc>IO*i7S`F;~LUi)Uh??0|K>!BqYt8(!dMUJ!F72oV#8 zamZJ>{Kn61BSqS4DS@E!LnaFWBFM$!?@G{s?wOIia5tyWDz<$0mvGXP)rXe%<#^^x zUiPzV;&GVkYZ5lGSTO-UFGJ3r3~Pd>3aojW%JyhpX!7icq6=Yb8RB@JOFJw~dWe>N zN8H{a(vsGE_pP~)3_Cn;-D10pK38Bnnz7z&3K`gT2FytQc)1yxGJhK-SP83`{_%mn zlek!`aRNdI3I8rwZ54K`X`Yw^t`yE1cA2j(u_mnB%$bU36f8%CpQY5Boyxy5)fSg` zty<dNJ!ppVG;Fpca~g}*Vp-)m49>WX*Q)jb0f-u3{h|G?5rFG%W522QsNb~Rd+0}U zY^<Z|Pt^O#$|$$o+M=*yvNyf4W>N=Vlh=v3`+u=&ynlHSE{x451MU|N-w+C)e3r_$ z&-g%^lIvMUc3yqr)}FR6^y#dos-aVb#9G2&!rAY4o(iQ)%>+xq6t(l2PxQkyLrD?= z6_N4Bo(+I>#j{}tg#lv?CDSEj)4f|}nNg+M%-(^g%_JF$ak00HO?l7hGJL1`tkJ29 zA*0Uc&zSB%f$AilBzdI8RS{RDWTW2sVC;NfOPfjDO*?KX9#w4H{k$a7vY`g>HeA&- zw_!`rZWP@xRvuvG2dFF>p!V>V`kD?m?w;7q74X%zVuyuB{kvb8Y%VGrWFJZ{&lMcH zKQsR`vTD&g4A*O23F(?JaF2Sv3Yp1VUFndRmk-+i(H=FX>8!MReRIjBW#p}nH<7dE zqj)gr7ML$LTEmF#yh$u^Iu$|yEGLzTW_ZGX3uLYtn05n`93+2#u@!6aPFc0D3E`{G zAS6STMibwCJEKrY-l+9?^!EYXk*XO{+;7%z?1T~He<*ryjT+THaF;#Q#p85ZSSPDQ zr-ac&>gk#}i1WP=Gs6gwlD^1NmC?A*e`7(NgUdFY`ty>MQ0cykZ^`Sw^H!v<XuzHk z#1TSn<b%3e&)xA;=cZNnI4!aUdrfOK#>R$NW7r}n0Lr`K5)yc0_x_Sw^kFxDTEK+u zn>);lk1{r58t@|(G#gm^X-M87H&<@O<TB*84`LOohjkamo5$!yB7ZX=D1GHp^JO2V zzTDd+ybB;urX<!bszOL8n&{bV3#c3IXNu+jVxhcD(r$t?i<Mer#yWfCJ_CX{i@!@n zmWV=DewYYbrz85;_N8^&^oP<2Dwunm!W8iwu@WR%22qhjfs)LbuVHtHzo=}(RKa}L zr2;h9cfcBM<4FU&knG&IFvY0H`bvCYj9rh08i8A>68R6kQ#2S^sp8x$ec2D9ihOi| z%=<bPPAlB`G$+ZPLl+r0n%@R1c%o18)YfF8knyzqW}F{vx|Lt@1e{`!2{odfQ0=Og ztdy<i!^NjfGoLk8@sIz|^dy}rIP((zcOtv{oqt}~{|P4~rn=GnZ{raqnW%k>yIpSr zJqe`4;_X^rBD*3h`ofr9JC)a%ke>m_shcHaesihSxZMR8#Bo~TW2K`7($JpG)<({; zA`ldKZ+@JA0%ttZy!LkAHmrJ5zc2-PPzH_zOOZzDe5@X+BOR_xl7A{V*yLV>(jU%r zTz;>7^y7ovtnE^l{Z%f3p2h2-lBfsxig~FnI_>^k&olEfJ*<-BC2#F9PaCn{M3fRr z{b|=%2Fr!YRv6)p!<j`|AxZS@XAWI;Zo`P&_9}<fs<0~7k<0H^%P4WqTK&M*{ly3l zqHKd_vU|IJ5%OL?9=HBH97IOko_CJd_f1Y$JU1-J|70G%t(d&J>>lh|pK=hAWij#h z&FRct;<eDlT`XKxhry3M_?JNWoGTZyc_k9?QR{o?Jk5@+-KraC&gYXqF<l5g+!q-| zqm#4#=35*+T{vUwg<XsUp_!@aEix7vudwY6g-EGp8pAwPWN1^hk_gyB9{<>}PZt!7 z*>u!IJt{FIKl9qmr2KecpKR1XuN=e!Mmr?Ma>TuVdyVu@OM8_(zm`20RgwS7zCi$L z!RAK)GE+zXf~JrH-eQ+8+>?QYsYb;<%YNs!nP=-RUCC6Oai(4t5=1pL`Tk`vu}i61 zCul((!C7izl598Vl2`oe)^Q9~4-NoURm|XwN0n`TLXd<B$U-gqmJZH%B$zF1+%1YT z9{r1t_rLbiaV18DKlmbW`<_iv3joPgwc_1xOrf3AV24_z&^r&@#Q^wf$I2I3yS3=U zHZl<BeL6icF+!$mF?Q`ok71q6CN6eyCkyzX&(ztjzXo}U$ca0z8QZD*s0<T-VR(s7 z+#kgqw+q3~2lP$`xo`E}vY!(eJt~nW+dNd%z5@h(rM?nIkKHG#KkEAkw7>sV#aGdD zOu&#uxhLG4`R$ZRnW9vmeR}2DqC45r2$)5PC3KQUS6^J>?dUDW_uG9wYibpm2<wLL z&q6-<t@GCgdAD1n<oT{<gye%Pl2a4?wlaK|r8OOua5cza&nKs=0@xtf+HX>{cX!f5 z3iSR9vcGbm8`l??hbH(x{h)X|->#0GBGi4p7I!7d|4c7pNn9RN@ERR`KiTo;6kU)v zaB!c8Oa5eyO*E;G%9Jg5D>{Pp&Xmi;kS9Yy>B)M_jRE#I`}NqA+Z$)D?to6HF5qf~ z=!x?6p%$JK5guJmtj}g#u(*}&h30n6M-d&rrj6C_4gIqB`b0Tcm<kEH&%45H-6K0* zfqDo)bq`Q550LCy+F`WfUx(vpCiM4(&{Tq$IW8*BAdaJ3@*UU}NxjF7(7<?&<9$37 zCQ2)BzL(rWm2rD4qV9{`y{49K){k2;`w@236=#%O$yBd9b%{H3=Q`LCsg5R(ZMAdz zx`o&4B7D&sG+$=EM7Inly$Q0C-~@tN+zg`dor~znw8bNYe+t=5a<HC*2DN>T0|x4{ zXF>-CT8!(1{P&9oC8X6N=kQ5T)VZTi{tO_WH6&YMHfSSCJD+xn1UR61rbEM0QS%#t z3Cu{0J>h2^DvGRujAGUYL_#r7g>#I_HECElNvxB>1|r?FgN3c1ZQX0V$w>%gKA3AU zKRJNT6Yz4m@zR{+jC&mgK`egX?RyIzd1s8@%jIm6UTXPgBk)av!x^Ad8*vSU0rR%O zq3o@6(%<MvdE%whv6{FKoYYZ!_&8z1XvxK}HOPs7Vc^e5szY_*J4-G;KEL}(nqGVf zb~^(r4DF%fk03P6Oq5YE@<9OnSSVSgrjmmm0i@WOlF6bQK`Rz}H!SwFhqp9MO2+NU zhbRWOexvE8Ixx!1Qui6>xqO>3jNm^S9c8#iqRziHI#h6%kbjx!a6OmEi#Iq65;f5c zW32dh%_ugJc;?-@BIR^ERa$j;8rNVPv36yUcH_2s28-oF#(YEJr#oi=ya%Mj7UL*( zU%^ZZW4`_nqaq#e-G*Io^f{1|AcL3^2bK+wVqr=Yf^ic=DKLVtgXw0%4?6j}zAgrp zczHAAaa2aew*P>7nOmF+3zW!4B83{bo+ozw&@5H(f4$j3K=<YE=>sERO{7|aCZR9` zyS58MRWPKvX|J_PviZ}wxx3qV3mM-G^VRmy?B@umWA)4mRy{0zi&fV`*tXu`0p9Mi z?2E~@xv=}I1{2(>fz4zww-ontI>x(iz$Pi>5-tuEN6Wg1`<d<E(0IxZS4UU*QKx#Q znaiPb-P5aKe{rtMP+F>;LU*a}M7f^CX8abGVQc+s+J4r!bp8i{tyw>Apref&KSfro zV!(%4)~7$aK?M$yJw#RsWZJRJQM6}Hi}6fAFS=OuHqA-JC%n#fwxqu_Su`~?8BiL; zVI@nRI=penfY;*E#Tm9ewB}$O+|Dj8BEQw%f1M-?;ac_S!Z6WuhM&#BXNW#iqfb10 zs~`*y$r}2C*2P@i5pY@N4~W{_G+<?fIH|dqYJV?@7u1GXSqo;S7e{;WSE`&bmpkS< zl@64~#dVWpBOa~a3cRXDFUSg&;BiMh^MpGN(nJ&zTCwSJoVx7WekK(Xs0m9YoXZSm zwR&MyuTklm<45JSSn&}#e~?=)Dlljo<ec#`zi|Pwm%lgEVx-Vc*jI#p!if)R^0y|d zd?%8rb5tLSK5_D`-fiDk!MpR|A$kNbdEE8)trUNE5hHW;yBmuExA)(Dt`Gb}<uua` zeq>>42J29XhiwoZ;h8U#FBvda&mqh%s^Yqf@4v}hWkeQG_vCqobnEdMRY|D~dY1G4 zg(8}nLxjDjo<5)@n1CVFu`^hY{KG-tw9AtyH6hC^7hSezW3diG0a6RjhB`1sg`vg$ zWw#ig-A`I6x5BndwhO+C9VoYs%6`*`$8(E{BGZRMf`NZqHUlfw&ut^IEaFR{y6~~d zTyuK$vSeRYo(gX$NzNE@@0*MfV*6D<>eAm6nviIrC#T=r7xP2G2{;G*&=)ep>z{o^ zRYqt`bB(Wsg@WkSK85IJN9i>9)>c*N&!E`KY&u~+q}b%~r?dI5O~xG^YRflQZF~q$ z@Ov6P|DWev1zoCZazTpYgMPqE!rt1j(8G{&ot^+fkaA@)kIg;k^6iLp{e-xYtXVhw zdWlZyf<-Ji39zEtS=v<thlyfT*+U#Z(xUT^!tGqBuAb5}BFQn~V(LiM_;mT(4-{c9 zt=7nHJ!2o3lW=>k{jffK8dW{##EVlVp{U~=D6b#g4>Kp7(2i{>iwVL}5Xi}ag8ZME zY3m%8ACRFbL4Eo6j01rrgG8AF-6rEH;xd?7YHj+jjLrm{t5l?35Nvg5Q+*<SwKn@U zalHcTM3hzJhHj^c47W7!h00|H_oq1+4vv(jSIpINNL=Xg^==4X-qO<yyA*90kK<VD zELW4PLJNTkzZ!ne9vVdT*?No5a*(=|kb%5fBPCnNf8(o!pC*S?<U?x}=4;&IP>YVV zAMo0)J1ZX&t=Z<=%0Z@_5w`0%oN&x6&`Fv$QW;)GrRoX}y;R*P_Y{n@cd;_8qU3sG zz_#8ecP?LQ<p|er^)nFtmKZxgdCFkT8U<~;#=K|r9R&w$a)#jkam@#jQ4+*TXHHs- z&nxn5h09H2gbNfU(?;5GkV-mhOri98@#C5)-n}J?<TZ(UC=kt_*H-c8P@)ti4MIi& zE5Urg|A;63zcU!us{8jDyn~y;&#A;o)%lH}s>%P%V3KiYe*~!z>ajMM6O{T7$XjmQ zMkN!^eIhST%rO*k8&1knIffl6A{Wnnv~!<QtgIUWNRS`fRxZKI;<S$QD4(rWhlW3D zYjPcF3<Z{CGSbXVRXF4ERKzwOD(8b#Tz+4YxvtiR1Tu2JGDB&75<I~3`0)`J3r@}l zG*qO!+Z5VLzB+@nG^(_|T2y5{khNHFiaYk%ZfR}WD`Zlfd=_B%XtzJ{P5V8shssAi z(C1{c!hyv<8qd@2kp7<e;I`a0NhlXMWk32^t)6U}HRDR@=&M%eEgCD`{AawCOU>7* zb!W8T%f=fDrSU@ucI-qgJ20>8`nGrcWoj@1kbc_!5eea1IQ$denl&Zq({Ya^MJz3D z(~ahMUBPmi|C}8_%lSN5k$yt?P04Id{1y^xmjL6Kd#*+*;lyRZ3$%a!n$~e=BR+1x zYXf#~djJR(z9}#EH`9+~NHvP*dH*i%Q5MFDNwC5yOL@d^J%%${8<jJk1e#Jl`1y1! z?BV)MX@I!uti8JNj6g;zfCssSxWrRgR`VfbTrA9g!Oc(*o2E&l2JNQf&mfS3n5FA@ zz@4IU2-8#Hi*Fb5%WRYV`OF=cv$&nLOE0&faTtb)qLhhSb7L`68<$s!6T;NY)!tA1 z_G-a$nngt&sy}RB(%^aWgk=G)=%dX!H+briS=x?ev%$F{Iaa$-+ZAu4_exvb&vspk zzmh`olmg`UMA;8Z$f_(^Q?WJeap<Dy^k13+i+11eCig!niJBRw)4UL>Y$6lF9DkvB z6H}@OxeF_(+^lI4Oe_VQxO9CQq801zIwSHJK%9a+P;B|G*j|ypw4tT}l^wqjpldiy z`N}EgWb`Fx3Dsu?&u53@jC9Lyff22Vzlu#4f=<#z6x^yU*9<=J0R1cv9wNC0T}?b7 z&lhMrtY0nGEl@VH)GxY+5T6;S6Db@ngP8IktpqCEe*LX$^ZZl#TvOINfewA5VCYLr zCIqs8NTBm#NhE2xW&?uw&^Ws2(dpCl#(WQ{GUVp!yHa+P)K;lyF7hX203s~4J;qbC zw^x^-rx2zcoY)>DHmNQU+<Syr?MF1<3s@SAp;%JJ3M7fI26*NPO3;{G1-|+|Z_@I! zV~_dIS<QI1&BofO#+c1Q`l@ZN#C$|=<C>Bq(?TZ=O~45TU!QY}n<Md5Pv+BflYq<@ zkJX@X^DO)66y;E`ZD>zK=cOD^IB=8vT>$j{HuX^$>c!N>8CGe_zeZM5g$B=@UooW~ ztH{7?n!F;RfVm5Wh$V1blXArosYW}`Gm+y=z_Dd;OH2%nd^WKHSQY4F5HacrVt4Tq zpX5lAQ<*ZoayM5G^P)6qmrM$2A^v>qR~!zhWcQ3jYm|A5t4by;3=ujq!teDar?!C} zT={+wHQT4VIkpWWZ4oTHHhvtYFvCRlOG7XVBWz|>tQKk{lP1tlC+jsqd8Hw!9WZV; z%*a50Afq#`zLk+NSfG5I0~2OV2pLY`@mbl>QF1Kd1eF(mqwp5LnF8x~gmd644i3o5 zlTn_79C+u;MY}V8Ceaja+Kuh-mKjZ`Cvm26Ml^rnWX0qt4o!Tnv{!#$oM!i>n2PPT zSwG@>Lh?Wkqs(7HTMmkn3&vNcr1aGU`rCP?yG14yqd}NldMbL1+?EyKg~3mTLH-d8 v)SMV%ly>bQ!0P|f#$ZZ2mK=?uN`-?={re;98n_v#V45nIdU$sU?kM~h<LCt+ literal 0 HcmV?d00001 diff --git a/docs/api/experimental/functional.md b/docs/api/experimental/functional.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs/api/functional.md b/docs/api/functional.md index 5af602560..269acf108 100644 --- a/docs/api/functional.md +++ b/docs/api/functional.md @@ -10,25 +10,26 @@ title: Functional .. automethod:: gymnasium.functional.FuncEnv.transform .. automethod:: gymnasium.functional.FuncEnv.initial - .. automethod:: gymnasium.functional.FuncEnv.initial_info .. automethod:: gymnasium.functional.FuncEnv.transition .. automethod:: gymnasium.functional.FuncEnv.observation .. automethod:: gymnasium.functional.FuncEnv.reward .. automethod:: gymnasium.functional.FuncEnv.terminal + + .. automethod:: gymnasium.functional.FuncEnv.state_info .. automethod:: gymnasium.functional.FuncEnv.transition_info + .. automethod:: gymnasium.functional.FuncEnv.render_init .. automethod:: gymnasium.functional.FuncEnv.render_image - .. automethod:: gymnasium.functional.FuncEnv.render_initialise .. automethod:: gymnasium.functional.FuncEnv.render_close ``` ## Converting Jax-based Functional environments to standard Env ```{eval-rst} -.. autoclass:: gymnasium.utils.functional_jax_env.FunctionalJaxEnv +.. autoclass:: gymnasium.envs.functional_jax_env.FunctionalJaxEnv - .. automethod:: gymnasium.utils.functional_jax_env.FunctionalJaxEnv.reset - .. automethod:: gymnasium.utils.functional_jax_env.FunctionalJaxEnv.step - .. automethod:: gymnasium.utils.functional_jax_env.FunctionalJaxEnv.render + .. automethod:: gymnasium.envs.functional_jax_env.FunctionalJaxEnv.reset + .. automethod:: gymnasium.envs.functional_jax_env.FunctionalJaxEnv.step + .. automethod:: gymnasium.envs.functional_jax_env.FunctionalJaxEnv.render ``` diff --git a/docs/environments/third_party_environments.md b/docs/environments/third_party_environments.md index de7eef747..84b757648 100644 --- a/docs/environments/third_party_environments.md +++ b/docs/environments/third_party_environments.md @@ -13,6 +13,8 @@ multi-objective RL ([MO-Gymnasium](https://mo-gymnasium.farama.org/)) many-agent RL ([MAgent2](https://magent2.farama.org/)), 3D navigation ([Miniworld](https://miniworld.farama.org/)), and many more. +## Third-party environments with Gymnasium + *This page contains environments which are not maintained by Farama Foundation and, as such, cannot be guaranteed to function as intended.* *If you'd like to contribute an environment, please reach out on [Discord](https://discord.gg/bnJ6kubTg6).* diff --git a/docs/index.md b/docs/index.md index 7d75ef4e6..4cb7697dc 100644 --- a/docs/index.md +++ b/docs/index.md @@ -47,8 +47,12 @@ env.close() :caption: Introduction introduction/basic_usage +introduction/train_agent +introduction/create_custom_env +introduction/record_agent +introduction/speed_up_env introduction/gym_compatibility -introduction/migration-guide +introduction/migration_guide ``` ```{toctree} diff --git a/docs/introduction/basic_usage.md b/docs/introduction/basic_usage.md index 77319eb9c..dec6861d2 100644 --- a/docs/introduction/basic_usage.md +++ b/docs/introduction/basic_usage.md @@ -9,9 +9,9 @@ firstpage: ```{eval-rst} .. py:currentmodule:: gymnasium -Gymnasium is a project that provides an API for all single agent reinforcement learning environments, and includes implementations of common environments: cartpole, pendulum, mountain-car, mujoco, atari, and more. +Gymnasium is a project that provides an API (application programming interface) for all single agent reinforcement learning environments with implementations of common environments: cartpole, pendulum, mountain-car, mujoco, atari, and more. This page will outline the basics of how to use Gymnasium including its four key functions: :meth:`make`, :meth:`Env.reset`, :meth:`Env.step` and :meth:`Env.render`. -The API contains four key functions: :meth:`make`, :meth:`Env.reset`, :meth:`Env.step` and :meth:`Env.render`, that this basic usage will introduce you to. At the core of Gymnasium is :class:`Env`, a high-level python class representing a markov decision process (MDP) from reinforcement learning theory (this is not a perfect reconstruction, and is missing several components of MDPs). Within gymnasium, environments (MDPs) are implemented as :class:`Env` classes, along with :class:`Wrapper`, provide helpful utilities to change actions passed to the environment and modified the observations, rewards, termination or truncations conditions passed back to the user. +At the core of Gymnasium is :class:`Env`, a high-level python class representing a markov decision process (MDP) from reinforcement learning theory (note: this is not a perfect reconstruction, missing several components of MDPs). The class provides users the ability generate an initial state, transition / move to new states given an action and the visualise the environment. Alongside :class:`Env`, :class:`Wrapper` are provided to help augment / modify the environment, in particular, the agent observations, rewards and actions taken. ``` ## Initializing Environments @@ -30,12 +30,12 @@ env = gym.make('CartPole-v1') ```{eval-rst} .. py:currentmodule:: gymnasium -This will return an :class:`Env` for users to interact with. To see all environments you can create, use :meth:`pprint_registry`. Furthermore, :meth:`make` provides a number of additional arguments for specifying keywords to the environment, adding more or less wrappers, etc. +This function will return an :class:`Env` for users to interact with. To see all environments you can create, use :meth:`pprint_registry`. Furthermore, :meth:`make` provides a number of additional arguments for specifying keywords to the environment, adding more or less wrappers, etc. See :meth:`make` for more information. ``` ## Interacting with the Environment -The classic "agent-environment loop" pictured below is simplified representation of reinforcement learning that Gymnasium implements. +Within reinforcement learning, the classic "agent-environment loop" pictured below is simplified representation of how an agent and environment interact with each other. The agent receives an observation about the environment, the agent then selects an action that the environment uses to determine the reward and the next observation. The cycle then repeating itself until the environment ends (terminates). ```{image} /_static/diagrams/AE_loop.png :width: 50% @@ -49,19 +49,20 @@ The classic "agent-environment loop" pictured below is simplified representation :class: only-dark ``` -This loop is implemented using the following gymnasium code +For gymnasium, the "agent-environment-loop" is implemented below for a single episode (until the environment ends). See the next section for a line-by-line explanation. Note that running this code requires install swig (`pip install swig` or [download](https://www.swig.org/download.html)) along with `pip install gymnasium[box2d]`. ```python import gymnasium as gym + env = gym.make("LunarLander-v2", render_mode="human") observation, info = env.reset() -for _ in range(1000): +episode_over = False +while not episode_over: action = env.action_space.sample() # agent policy that uses the observation and info observation, reward, terminated, truncated, info = env.step(action) - if terminated or truncated: - observation, info = env.reset() + episode_over = terminated or truncated env.close() ``` @@ -78,17 +79,15 @@ The output should look something like this: ```{eval-rst} .. py:currentmodule:: gymnasium -First, an environment is created using :meth:`make` with an additional keyword ``"render_mode"`` that specifies how the environment should be visualised. - -.. py:currentmodule:: gymnasium.Env +First, an environment is created using :meth:`make` with an additional keyword ``"render_mode"`` that specifies how the environment should be visualised. See :meth:`Env.render` for details on the default meaning of different render modes. In this example, we use the ``"LunarLander"`` environment where the agent controls a spaceship that needs to land safely. -See :meth:`render` for details on the default meaning of different render modes. In this example, we use the ``"LunarLander"`` environment where the agent controls a spaceship that needs to land safely. +After initializing the environment, we :meth:`Env.reset` the environment to get the first observation of the environment along with an additional information. For initializing the environment with a particular random seed or options (see the environment documentation for possible values) use the ``seed`` or ``options`` parameters with :meth:`reset`. -After initializing the environment, we :meth:`reset` the environment to get the first observation of the environment. For initializing the environment with a particular random seed or options (see environment documentation for possible values) use the ``seed`` or ``options`` parameters with :meth:`reset`. +As we wish to continue the agent-environment loop until the environment ends, which is in an unknown number of timesteps, we define ``episode_over`` as a variable to know when to stop interacting with the environment along with a while loop that uses it. -Next, the agent performs an action in the environment, :meth:`step`, this can be imagined as moving a robot or pressing a button on a games' controller that causes a change within the environment. As a result, the agent receives a new observation from the updated environment along with a reward for taking the action. This reward could be for instance positive for destroying an enemy or a negative reward for moving into lava. One such action-observation exchange is referred to as a **timestep**. +Next, the agent performs an action in the environment, :meth:`Env.step` executes the select actions (in this case random with ``env.action_space.sample()``) to update the environment. This action can be imagined as moving a robot or pressing a button on a games' controller that causes a change within the environment. As a result, the agent receives a new observation from the updated environment along with a reward for taking the action. This reward could be for instance positive for destroying an enemy or a negative reward for moving into lava. One such action-observation exchange is referred to as a **timestep**. -However, after some timesteps, the environment may end, this is called the terminal state. For instance, the robot may have crashed, or the agent have succeeded in completing a task, the environment will need to stop as the agent cannot continue. In gymnasium, if the environment has terminated, this is returned by :meth:`step`. Similarly, we may also want the environment to end after a fixed number of timesteps, in this case, the environment issues a truncated signal. If either of ``terminated`` or ``truncated`` are ``True`` then :meth:`reset` should be called next to restart the environment. +However, after some timesteps, the environment may end, this is called the terminal state. For instance, the robot may have crashed, or may have succeeded in completing a task, the environment will need to stop as the agent cannot continue. In gymnasium, if the environment has terminated, this is returned by :meth:`step` as the third variable, ``terminated``. Similarly, we may also want the environment to end after a fixed number of timesteps, in this case, the environment issues a truncated signal. If either of ``terminated`` or ``truncated`` are ``True`` then we end the episode but in most cases users might wish to restart the environment, this can be done with `env.reset()`. ``` ## Action and observation spaces @@ -96,26 +95,23 @@ However, after some timesteps, the environment may end, this is called the termi ```{eval-rst} .. py:currentmodule:: gymnasium.Env -Every environment specifies the format of valid actions and observations with the :attr:`action_space` and :attr:`observation_space` attributes. This is helpful for both knowing the expected input and output of the environment as all valid actions and observation should be contained with the respective space. - -In the example, we sampled random actions via ``env.action_space.sample()`` instead of using an agent policy, mapping observations to actions which users will want to make. See one of the agent tutorials for an example of creating and training an agent policy. - -.. py:currentmodule:: gymnasium +Every environment specifies the format of valid actions and observations with the :attr:`action_space` and :attr:`observation_space` attributes. This is helpful for both knowing the expected input and output of the environment as all valid actions and observation should be contained with their respective space. In the example above, we sampled random actions via ``env.action_space.sample()`` instead of using an agent policy, mapping observations to actions which users will want to make. -Every environment should have the attributes :attr:`Env.action_space` and :attr:`Env.observation_space`, both of which should be instances of classes that inherit from :class:`spaces.Space`. Gymnasium has support for a majority of possible spaces users might need: +Importantly, :attr:`Env.action_space` and :attr:`Env.observation_space` are instances of :class:`Space`, a high-level python class that provides the key functions: :meth:`Space.contains` and :meth:`Space.sample`. Gymnasium has support for a wide range of spaces that users might need: .. py:currentmodule:: gymnasium.spaces -- :class:`Box`: describes an n-dimensional continuous space. It's a bounded space where we can define the upper and lower - limits which describe the valid values our observations can take. +- :class:`Box`: describes bounded space with upper and lower limits of any n-dimensional shape. - :class:`Discrete`: describes a discrete space where ``{0, 1, ..., n-1}`` are the possible values our observation or action can take. - Values can be shifted to ``{a, a+1, ..., a+n-1}`` using an optional argument. -- :class:`Dict`: represents a dictionary of simple spaces. -- :class:`Tuple`: represents a tuple of simple spaces. -- :class:`MultiBinary`: creates an n-shape binary space. Argument n can be a number or a list of numbers. +- :class:`MultiBinary`: describes a binary space of any n-dimensional shape. - :class:`MultiDiscrete`: consists of a series of :class:`Discrete` action spaces with a different number of actions in each element. +- :class:`Text`: describes a string space with a minimum and maximum length +- :class:`Dict`: describes a dictionary of simpler spaces. +- :class:`Tuple`: describes a tuple of simple spaces. +- :class:`Graph`: describes a mathematical graph (network) with interlinking nodes and edges +- :class:`Sequence`: describes a variable length of simpler space elements. -For example usage of spaces, see their `documentation </api/spaces>`_ along with `utility functions </api/spaces/utils>`_. There are a couple of more niche spaces :class:`Graph`, :class:`Sequence` and :class:`Text`. +For example usage of spaces, see their `documentation <../api/spaces>`_ along with `utility functions <../api/spaces/utils>`_. There are a couple of more niche spaces :class:`Graph`, :class:`Sequence` and :class:`Text`. ``` ## Modifying the environment @@ -123,7 +119,7 @@ For example usage of spaces, see their `documentation </api/spaces>`_ along with ```{eval-rst} .. py:currentmodule:: gymnasium.wrappers -Wrappers are a convenient way to modify an existing environment without having to alter the underlying code directly. Using wrappers will allow you to avoid a lot of boilerplate code and make your environment more modular. Wrappers can also be chained to combine their effects. Most environments that are generated via ``gymnasium.make`` will already be wrapped by default using the :class:`TimeLimitV0`, :class:`OrderEnforcingV0` and :class:`PassiveEnvCheckerV0`. +Wrappers are a convenient way to modify an existing environment without having to alter the underlying code directly. Using wrappers will allow you to avoid a lot of boilerplate code and make your environment more modular. Wrappers can also be chained to combine their effects. Most environments that are generated via :meth:`gymnasium.make` will already be wrapped by default using the :class:`TimeLimit`, :class:`OrderEnforcing` and :class:`PassiveEnvChecker`. In order to wrap an environment, you must first initialize a base environment. Then you can pass this environment along with (possibly optional) parameters to the wrapper's constructor: ``` @@ -144,10 +140,10 @@ In order to wrap an environment, you must first initialize a base environment. T Gymnasium already provides many commonly used wrappers for you. Some examples: -- :class:`TimeLimitV0`: Issue a truncated signal if a maximum number of timesteps has been exceeded (or the base environment has issued a truncated signal). -- :class:`ClipActionV0`: Clip the action such that it lies in the action space (of type `Box`). -- :class:`RescaleActionV0`: Rescale actions to lie in a specified interval -- :class:`TimeAwareObservationV0`: Add information about the index of timestep to observation. In some cases helpful to ensure that transitions are Markov. +- :class:`TimeLimit`: Issues a truncated signal if a maximum number of timesteps has been exceeded (or the base environment has issued a truncated signal). +- :class:`ClipAction`: Clips any action passed to ``step`` such that it lies in the base environment's action space. +- :class:`RescaleAction`: Applies an affine transformation to the action to linearly scale for a new low and high bound on the environment. +- :class:`TimeAwareObservation`: Add information about the index of timestep to observation. In some cases helpful to ensure that transitions are Markov. ``` For a full list of implemented wrappers in gymnasium, see [wrappers](/api/wrappers). @@ -167,6 +163,9 @@ If you have a wrapped environment, and you want to get the unwrapped environment ## More information -* [Making a Custom environment using the Gymnasium API](/tutorials/gymnasium_basics/environment_creation/) -* [Training an agent to play blackjack](/tutorials/training_agents/blackjack_tutorial) -* [Compatibility with OpenAI Gym](/introduction/gym_compatibility) +* [Training an agent](train_agent) +* [Making a Custom Environment](create_custom_env) +* [Recording an agent's behaviour](record_agent) +* [Speeding up an Environment](speed_up_env) +* [Compatibility with OpenAI Gym](gym_compatibility) +* [Migration Guide for Gym v0.21 to v0.26 and for v1.0.0](migration_guide) diff --git a/docs/introduction/create_custom_env.md b/docs/introduction/create_custom_env.md new file mode 100644 index 000000000..3e92b6c08 --- /dev/null +++ b/docs/introduction/create_custom_env.md @@ -0,0 +1,227 @@ +--- +layout: "contents" +title: Create custom env +--- + +# Create a Custom Environment + +This page provides a short outline of how to create custom environments with Gymnasium, for a more [complete tutorial](../tutorials/gymnasium_basics/environment_creation) with rendering, please read [basic usage](basic_usage) before reading this page. + +We will implement a very simplistic game, called ``GridWorldEnv``, consisting of a 2-dimensional square grid of fixed size. The agent can move vertically or horizontally between grid cells in each timestep and the goal of the agent is to navigate to a target on the grid that has been placed randomly at the beginning of the episode. + +Basic information about the game +- Observations provide the location of the target and agent. +- There are 4 discrete actions in our environment, corresponding to the movements "right", "up", "left", and "down". +- The environment ends (terminates) when the agent has navigated to the grid cell where the target is located. +- The agent is only rewarded when it reaches the target, i.e., the reward is one when the agent reaches the target and zero otherwise. + +## Environment `__init__` + +```{eval-rst} +.. py:currentmodule:: gymnasium + +Like all environments, our custom environment will inherit from :class:`gymnasium.Env` that defines the structure of environment. One of the requirements for an environment is defining the observation and action space, which declare the general set of possible inputs (actions) and outputs (observations) of the environment. As outlined in our basic information about the game, our agent has four discrete actions, therefore we will use the ``Discrete(4)`` space with four options. +``` + +```{eval-rst} +.. py:currentmodule:: gymnasium.spaces + +For our observation, there are a couple options, for this tutorial we will imagine our observation looks like ``{"agent": array([1, 0]), "target": array([0, 3])}`` where the array elements represent the x and y positions of the agent or target. Alternative options for representing the observation is as a 2d grid with values representing the agent and target on the grid or a 3d grid with each "layer" containing only the agent or target information. Therefore, we will declare the observation space as :class:`Dict` with the agent and target spaces being a :class:`Box` allowing an array output of an int type. +``` + +For a full list of possible spaces to use with an environment, see [spaces](../api/spaces) + +```python +from typing import Optional +import numpy as np +import gymnasium as gym + + +class GridWorldEnv(gym.Env): + + def __init__(self, size: int = 5): + # The size of the square grid + self.size = size + + # Define the agent and target location; randomly chosen in `reset` and updated in `step` + self._agent_location = np.array([-1, -1], dtype=np.int32) + self._target_location = np.array([-1, -1], dtype=np.int32) + + # Observations are dictionaries with the agent's and the target's location. + # Each location is encoded as an element of {0, ..., `size`-1}^2 + self.observation_space = gym.spaces.Dict( + { + "agent": gym.spaces.Box(0, size - 1, shape=(2,), dtype=int), + "target": gym.spaces.Box(0, size - 1, shape=(2,), dtype=int), + } + ) + + # We have 4 actions, corresponding to "right", "up", "left", "down" + self.action_space = gym.spaces.Discrete(4) + # Dictionary maps the abstract actions to the directions on the grid + self._action_to_direction = { + 0: np.array([1, 0]), # right + 1: np.array([0, 1]), # up + 2: np.array([-1, 0]), # left + 3: np.array([0, -1]), # down + } +``` + +## Constructing Observations + +```{eval-rst} +.. py:currentmodule:: gymnasium + +Since we will need to compute observations both in :meth:`Env.reset` and :meth:`Env.step`, it is often convenient to have a method ``_get_obs`` that translates the environment's state into an observation. However, this is not mandatory and you can compute the observations in :meth:`Env.reset` and :meth:`Env.step` separately. +``` + +```python + def _get_obs(self): + return {"agent": self._agent_location, "target": self._target_location} +``` + +```{eval-rst} +.. py:currentmodule:: gymnasium + +We can also implement a similar method for the auxiliary information that is returned by :meth:`Env.reset` and :meth:`Env.step`. In our case, we would like to provide the manhattan distance between the agent and the target: +``` + +```python + def _get_info(self): + return { + "distance": np.linalg.norm( + self._agent_location - self._target_location, ord=1 + ) + } +``` + +```{eval-rst} +.. py:currentmodule:: gymnasium + +Oftentimes, info will also contain some data that is only available inside the :meth:`Env.step` method (e.g., individual reward terms). In that case, we would have to update the dictionary that is returned by ``_get_info`` in :meth:`Env.step`. +``` + +## Reset function + +```{eval-rst} +.. py:currentmodule:: gymnasium.Env + +As the purpose of :meth:`reset` is to initiate a new episode for an environment and has two parameters: ``seed`` and ``options``. The seed can be used to initialize the random number generator to a deterministic state and options can be used to specify values used within reset. On the first line of the reset, you need to call ``super().reset(seed=seed)`` which will initialize the random number generate (:attr:`np_random`) to use through the rest of the :meth:`reset`. + +Within our custom environment, the :meth:`reset` needs to randomly choose the agent and target's positions (we repeat this if they have the same position). The return type of :meth:`reset` is a tuple of the initial observation and any auxiliary information. Therefore, we can use the methods ``_get_obs`` and ``_get_info`` that we implemented earlier for that: +``` + +```python + def reset(self, seed: Optional[int] = None, options: Optional[dict] = None): + # We need the following line to seed self.np_random + super().reset(seed=seed) + + # Choose the agent's location uniformly at random + self._agent_location = self.np_random.integers(0, self.size, size=2, dtype=int) + + # We will sample the target's location randomly until it does not coincide with the agent's location + self._target_location = self._agent_location + while np.array_equal(self._target_location, self._agent_location): + self._target_location = self.np_random.integers( + 0, self.size, size=2, dtype=int + ) + + observation = self._get_obs() + info = self._get_info() + + return observation, info +``` + +## Step function + +```{eval-rst} +.. py:currentmodule:: gymnasium.Env + +The :meth:`step` method usually contains most of the logic for your environment, it accepts an ``action`` and computes the state of the environment after the applying the action, returning a tuple of the next observation, the resulting reward, if the environment has terminated, if the environment has truncated and auxiliary information. +``` +```{eval-rst} +.. py:currentmodule:: gymnasium + +For our environment, several things need to happen during the step function: + + - We use the self._action_to_direction to convert the discrete action (e.g., 2) to a grid direction with our agent location. To prevent the agent from going out of bounds of the grd, we clip the agen't location to stay within bounds. + - We compute the agent's reward by checking if the agent's current position is equal to the target's location. + - Since the environment doesn't truncate internally (we can apply a time limit wrapper to the environment during :meth:make), we permanently set truncated to False. + - We once again use _get_obs and _get_info to obtain the agent's observation and auxiliary information. +``` + +```python + def step(self, action): + # Map the action (element of {0,1,2,3}) to the direction we walk in + direction = self._action_to_direction[action] + # We use `np.clip` to make sure we don't leave the grid bounds + self._agent_location = np.clip( + self._agent_location + direction, 0, self.size - 1 + ) + + # An environment is completed if and only if the agent has reached the target + terminated = np.array_equal(self._agent_location, self._target_location) + truncated = False + reward = 1 if terminated else 0 # the agent is only reached at the end of the episode + observation = self._get_obs() + info = self._get_info() + + return observation, reward, terminated, truncated, info +``` + +## Registering and making the environment + +```{eval-rst} +While it is possible to use your new custom environment now immediately, it is more common for environments to be initialized using :meth:`gymnasium.make`. In this section, we explain how to register a custom environment then initialize it. + +The environment ID consists of three components, two of which are optional: an optional namespace (here: ``gymnasium_env``), a mandatory name (here: ``GridWorld``) and an optional but recommended version (here: v0). It may have also be registered as ``GridWorld-v0`` (the recommended approach), ``GridWorld`` or ``gymnasium_env/GridWorld``, and the appropriate ID should then be used during environment creation. + +The entry point can be a string or function, as this tutorial isn't part of a python project, we cannot use a string but for most environments, this is the normal way of specifying the entry point. + +Register has additionally parameters that can be used to specify keyword arguments to the environment, e.g., if to apply a time limit wrapper, etc. See :meth:`gymnasium.register` for more information. +``` + +```python +gym.register( + id="gymnasium_env/GridWorld-v0", + entry_point=GridWorldEnv, +) +``` + +For a more complete guide on registering a custom environment (including with a string entry point), please read the full [create environment](../tutorials/gymnasium_basics/environment_creation) tutorial. + +```{eval-rst} +Once the environment is registered, you can check via :meth:`gymnasium.pprint_registry` which will output all registered environment, and the environment can then be initialized using :meth:`gymnasium.make`. A vectorized version of the environment with multiple instances of the same environment running in parallel can be instantiated with :meth:`gymnasium.make_vec`. +``` + +```python +import gymnasium as gym +>>> gym.make("gymnasium_env/GridWorld-v0") +<OrderEnforcing<PassiveEnvChecker<GridWorld<gymnasium_env/GridWorld-v0>>>> +>>> gym.make("gymnasium_env/GridWorld-v0", max_episode_steps=100) +<TimeLimit<OrderEnforcing<PassiveEnvChecker<GridWorld<gymnasium_env/GridWorld-v0>>>>> +>>> env = gym.make("gymnasium_env/GridWorld-v0", size=10) +>>> env.unwrapped.size +10 +>>> gym.make_vec("gymnasium_env/GridWorld-v0", num_envs=3) +SyncVectorEnv(gymnasium_env/GridWorld-v0, num_envs=3) +``` + +## Using Wrappers + +Oftentimes, we want to use different variants of a custom environment, or we want to modify the behavior of an environment that is provided by Gymnasium or some other party. Wrappers allow us to do this without changing the environment implementation or adding any boilerplate code. Check out the [wrapper documentation](../api/wrappers) for details on how to use wrappers and instructions for implementing your own. In our example, observations cannot be used directly in learning code because they are dictionaries. However, we don't actually need to touch our environment implementation to fix this! We can simply add a wrapper on top of environment instances to flatten observations into a single array: + +```python +>>> from gymnasium.wrappers import FlattenObservation + +>>> env = gym.make('gymnasium_env/GridWorld-v0') +>>> env.observation_space +Dict('agent': Box(0, 4, (2,), int64), 'target': Box(0, 4, (2,), int64)) +>>> env.reset() +({'agent': array([4, 1]), 'target': array([2, 4])}, {'distance': 5.0}) +>>> wrapped_env = FlattenObservation(env) +>>> wrapped_env.observation_space +Box(0, 4, (4,), int64) +>>> wrapped_env.reset() +(array([3, 0, 2, 1]), {'distance': 2.0}) +``` diff --git a/docs/introduction/migration-guide.md b/docs/introduction/migration_guide.md similarity index 97% rename from docs/introduction/migration-guide.md rename to docs/introduction/migration_guide.md index 2484c5ec1..895104bc5 100644 --- a/docs/introduction/migration-guide.md +++ b/docs/introduction/migration_guide.md @@ -3,13 +3,12 @@ layout: "contents" title: Migration Guide --- -# v0.21 to v0.26 Migration Guide +# Migration Guide - v0.21 to v1.0.0 ```{eval-rst} .. py:currentmodule:: gymnasium.wrappers -Gymnasium is a fork of `OpenAI Gym v0.26 <https://github.com/openai/gym/releases/tag/0.26.2>`_, which introduced a large breaking change from `Gym v0.21 <https://github.com/openai/gym/releases/tag/v0.21.0>`_. In this guide, we briefly outline the API changes from Gym v0.21 - which a number of tutorials have been written for - to Gym v0.26. For environments still stuck in the v0.21 API, users can use the :class:`EnvCompatibility` wrapper to convert them to v0.26 compliant. -For more information, see the `guide </content/gym_compatibility>`_ +Gymnasium is a fork of `OpenAI Gym v0.26 <https://github.com/openai/gym/releases/tag/0.26.2>`_, which introduced a large breaking change from `Gym v0.21 <https://github.com/openai/gym/releases/tag/v0.21.0>`_. In this guide, we briefly outline the API changes from Gym v0.21 - which a number of tutorials have been written for - to Gym v0.26. For environments still stuck in the v0.21 API, see the `guide </content/gym_compatibility>`_ ``` ## Example code for v0.21 diff --git a/docs/introduction/record_agent.md b/docs/introduction/record_agent.md new file mode 100644 index 000000000..42ba4b607 --- /dev/null +++ b/docs/introduction/record_agent.md @@ -0,0 +1,96 @@ +--- +layout: "contents" +title: Recording Agents +--- + +# Recording Agents + +```{eval-rst} +.. py:currentmodule: gymnasium.wrappers + +During training or when evaluating an agent, it may be interesting to record agent behaviour over an episode and log the total reward accumulated. This can be achieved through two wrappers: :class:`RecordEpisodeStatistics` and :class:`RecordVideo`, the first tracks episode data such as the total rewards, episode length and time taken and the second generates mp4 videos of the agents using the environment renderings. + +We show how to apply these wrappers for two types of problems; the first for recording data for every episode (normally evaluation) and second for recording data periodiclly (for normal training). +``` + +## Recording Every Episode + +```{eval-rst} +.. py:currentmodule: gymnasium.wrappers + +Given a trained agent, you may wish to record several episodes during evaluation to see how the agent acts. Below we provide an example script to do this with the :class:`RecordEpisodeStatistics` and :class:`RecordVideo`. +``` + +```python +import gymnasium as gym +from gymnasium.wrappers import RecordEpisodeStatistics, RecordVideo + +num_eval_episodes = 4 + +env = gym.make("CartPole-v1", render_mode="rgb_array") # replace with your environment +env = RecordVideo(env, video_folder="cartpole-agent", name_prefix="eval", + episode_trigger=lambda x: True) +env = RecordEpisodeStatistics(env, buffer_length=num_eval_episodes) + +for episode_num in range(num_eval_episodes): + obs, info = env.reset() + + episode_over = False + while not episode_over: + action = env.action_space.sample() # replace with actual agent + obs, reward, terminated, truncated, info = env.step(action) + + episode_over = terminated or truncated +env.close() + +print(f'Episode time taken: {env.time_queue}') +print(f'Episode total rewards: {env.return_queue}') +print(f'Episode lengths: {env.length_queue}') +``` + +```{eval-rst} +.. py:currentmodule: gymnasium.wrappers + +In the script above, for the :class:`RecordVideo` wrapper, we specify three different variables: ``video_folder`` to specify the folder that the videos should be saved (change for your problem), ``name_prefix`` for the prefix of videos themselves and finally an ``episode_trigger`` such that every episode is recorded. This means that for every episode of the environment, a video will be recorded and saved in the style "cartpole-agent/eval-episode-x.mp4". + +For the :class:`RecordEpisodicStatistics`, we only need to specify the buffer lengths, this is the max length of the internal ``time_queue``, ``return_queue`` and ``length_queue``. Rather than collect the data for each episode individually, we can use the data queues to print the information at the end of the evaluation. + +For speed ups in evaluating environments, it is possible to implement this with vector environments to in order to evaluate ``N`` episodes at the same time in parallel rather than series. +``` + +## Recording the Agent during Training + +During training, an agent will act in hundreds or thousands of episodes, therefore, you can't record a video for each episode, but developers might still want to know how the agent acts at different points in the training, recording episodes periodically during training. While for the episode statistics, it is more helpful to know this data for every episode. The following script provides an example of how to periodically record episodes of an agent while recording every episode's statistics (we use the python's logger but [tensorboard](https://www.tensorflow.org/tensorboard), [wandb](https://docs.wandb.ai/guides/track) and other modules are available). + +```python +import logging + +import gymnasium as gym +from gymnasium.wrappers import RecordEpisodeStatistics, RecordVideo + +training_period = 250 # record the agent's episode every 250 +num_training_episodes = 10_000 # total number of training episodes + +env = gym.make("CartPole-v1", render_mode="rgb_array") # replace with your environment +env = RecordVideo(env, video_folder="cartpole-agent", name_prefix="training", + episode_trigger=lambda x: x % training_period == 0) +env = RecordEpisodeStatistics(env) + +for episode_num in range(num_training_episodes): + obs, info = env.reset() + + episode_over = False + while not episode_over: + action = env.action_space.sample() # replace with actual agent + obs, reward, terminated, truncated, info = env.step(action) + + episode_over = terminated or truncated + + logging.info(f"episode-{episode_num}", info["episode"]) +env.close() +``` + +## More information + +* [Training an agent](train_agent.md) +* [More training tutorials](../tutorials/training_agents) diff --git a/docs/introduction/speed_up_env.md b/docs/introduction/speed_up_env.md new file mode 100644 index 000000000..567139707 --- /dev/null +++ b/docs/introduction/speed_up_env.md @@ -0,0 +1,32 @@ +--- +layout: "contents" +title: Basic Usage +firstpage: +--- + +# Speeding Up Training + +Reinforcement Learning can be a computationally difficult problem that is both sample inefficient and difficult to scale to more complex environments. +In this page, we are going to talk about general strategies for speeding up training: vectorizing environments, optimizing training and algorithmic heuristics. + +## Vectorized environments + +```{eval-rst} +.. py:currentmodule:: gymnasium + +Normally in training, agents will sample from a single environment limiting the number of steps (samples) per second to the speed of the environment. Training can be substantially increased through acting in multiple environments at the same time, referred to as vectorized environments where multiple instances of the same environment run in parallel (on multiple CPUs). Gymnasium provide two built in classes to vectorize most generic environments: :class:`gymnasium.vector.SyncVectorEnv` and :class:`gymnasium.vector.AsyncVectorEnv` which can be easily created with :meth:`gymnasium.make_vec`. + +It should be noted that vectorizing environments might require changes to your training algorithm and can cause instability in training for very large numbers of sub-environments. +``` + +## Optimizing training + +Speeding up training can generally be achieved through optimizing your code, in particular, for deep reinforcement learning that use GPUs in training through the need to transfer data to and from RAM and the GPU memory. + +For code written in PyTorch and Jax, they provide the ability to `jit` (just in time compilation) the code order for CPU, GPU and TPU (for jax) to decrease the training time taken. + +## Algorithmic heuristics + +Academic researchers are consistently explore new optimizations to improve agent performance and reduce the number of samples required to train an agent. +In particular, sample efficient reinforcement learning is a specialist sub-field of reinforcement learning that explores optimizations for training algorithms and environment heuristics that reduce the number of agent observation need for an agent to maximise performance. +As the field is consistently improving, we refer readers to find to survey papers and the latest research to know what the most efficient algorithmic improves that exist currently. diff --git a/docs/introduction/train_agent.md b/docs/introduction/train_agent.md new file mode 100644 index 000000000..4cbc9173e --- /dev/null +++ b/docs/introduction/train_agent.md @@ -0,0 +1,165 @@ +--- +layout: "contents" +title: Train an Agent +--- + +# Training an Agent + +This page provides a short outline of how to train an agent for a Gymnasium environment, in particular, we will use a tabular based Q-learning to solve the Blackjack v1 environment. For a full complete version of this tutorial and more training tutorials for other environments and algorithm, see [this](../tutorials/training_agents). Please read [basic usage](basic_usage) before reading this page. Before we implement any code, here is an overview of Blackjack and Q-learning. + +Blackjack is one of the most popular casino card games that is also infamous for being beatable under certain conditions. This version of the game uses an infinite deck (we draw the cards with replacement), so counting cards won't be a viable strategy in our simulated game. The observation is a tuple of the player's current sum, the value of the dealers face-up card and a boolean value on whether the player holds a usable case. The agent can pick between two actions: stand (0) such that the player takes no more cards and hit (1) such that the player will take another player. To win, your card sum should be greater than the dealers without exceeding 21. The game ends if the player selects stand or if the card sum is greater than 21. Full documentation can be found at [https://gymnasium.farama.org/environments/toy_text/blackjack](https://gymnasium.farama.org/environments/toy_text/blackjack). + +Q-learning is a model-free off-policy learning algorithm by Watkins, 1989 for environments with discrete action spaces and was famous for being the first reinforcement learning algorithm to prove convergence to an optimal policy under certain conditions. + +## Executing an action + +After receiving our first observation, we are only going to use the``env.step(action)`` function to interact with the environment. This function takes an action as input and executes it in the environment. Because that action changes the state of the environment, it returns four useful variables to us. These are: + + - ``next observation``: This is the observation that the agent will receive after taking the action. + - ``reward``: This is the reward that the agent will receive after taking the action. + - ``terminated``: This is a boolean variable that indicates whether or not the environment has terminated, i.e., ended due to an internal condition. + - ``truncated``: This is a boolean variable that also indicates whether the episode ended by early truncation, i.e., a time limit is reached. + - ``info``: This is a dictionary that might contain additional information about the environment. + +The ``next observation``, ``reward``, ``terminated`` and ``truncated`` variables are self-explanatory, but the ``info`` variable requires some additional explanation. This variable contains a dictionary that might have some extra information about the environment, but in the Blackjack-v1 environment you can ignore it. For example in Atari environments the info dictionary has a ``ale.lives`` key that tells us how many lives the agent has left. If the agent has 0 lives, then the episode is over. + +Note that it is not a good idea to call ``env.render()`` in your training loop because rendering slows down training by a lot. Rather try to build an extra loop to evaluate and showcase the agent after training. + +## Building an agent + +Let's build a Q-learning agent to solve Blackjack! We'll need some functions for picking an action and updating the agents action values. To ensure that the agents explores the environment, one possible solution is the epsilon-greedy strategy, where we pick a random action with the percentage ``epsilon`` and the greedy action (currently valued as the best) ``1 - epsilon``. + +```python +from collections import defaultdict +import gymnasium as gym +import numpy as np + + +class BlackjackAgent: + def __init__( + self, + env: gym.Env, + learning_rate: float, + initial_epsilon: float, + epsilon_decay: float, + final_epsilon: float, + discount_factor: float = 0.95, + ): + """Initialize a Reinforcement Learning agent with an empty dictionary + of state-action values (q_values), a learning rate and an epsilon. + + Args: + env: The training environment + learning_rate: The learning rate + initial_epsilon: The initial epsilon value + epsilon_decay: The decay for epsilon + final_epsilon: The final epsilon value + discount_factor: The discount factor for computing the Q-value + """ + self.env = env + self.q_values = defaultdict(lambda: np.zeros(env.action_space.n)) + + self.lr = learning_rate + self.discount_factor = discount_factor + + self.epsilon = initial_epsilon + self.epsilon_decay = epsilon_decay + self.final_epsilon = final_epsilon + + self.training_error = [] + + def get_action(self, obs: tuple[int, int, bool]) -> int: + """ + Returns the best action with probability (1 - epsilon) + otherwise a random action with probability epsilon to ensure exploration. + """ + # with probability epsilon return a random action to explore the environment + if np.random.random() < self.epsilon: + return self.env.action_space.sample() + # with probability (1 - epsilon) act greedily (exploit) + else: + return int(np.argmax(self.q_values[obs])) + + def update( + self, + obs: tuple[int, int, bool], + action: int, + reward: float, + terminated: bool, + next_obs: tuple[int, int, bool], + ): + """Updates the Q-value of an action.""" + future_q_value = (not terminated) * np.max(self.q_values[next_obs]) + temporal_difference = ( + reward + self.discount_factor * future_q_value - self.q_values[obs][action] + ) + + self.q_values[obs][action] = ( + self.q_values[obs][action] + self.lr * temporal_difference + ) + self.training_error.append(temporal_difference) + + def decay_epsilon(self): + self.epsilon = max(self.final_epsilon, self.epsilon - self.epsilon_decay) +``` + +## Training the agent + +To train the agent, we will let the agent play one episode (one complete game is called an episode) at a time and then update it's Q-values after each episode. The agent will have to experience a lot of episodes to explore the environment sufficiently. + +```python +# hyperparameters +learning_rate = 0.01 +n_episodes = 100_000 +start_epsilon = 1.0 +epsilon_decay = start_epsilon / (n_episodes / 2) # reduce the exploration over time +final_epsilon = 0.1 + +agent = BlackjackAgent( + learning_rate=learning_rate, + initial_epsilon=start_epsilon, + epsilon_decay=epsilon_decay, + final_epsilon=final_epsilon, +) +``` + +Info: The current hyperparameters are set to quickly train a decent agent. If you want to converge to the optimal policy, try increasing the ``n_episodes`` by 10x and lower the learning_rate (e.g. to 0.001). + +```python +from tqdm import tqdm + +env = gym.make("Blackjack-v1", sab=False) +env = gym.wrappers.RecordEpisodeStatistics(env, deque_size=n_episodes) + +for episode in tqdm(range(n_episodes)): + obs, info = env.reset() + done = False + + # play one episode + while not done: + action = agent.get_action(obs) + next_obs, reward, terminated, truncated, info = env.step(action) + + # update the agent + agent.update(obs, action, reward, terminated, next_obs) + + # update if the environment is done and the current obs + done = terminated or truncated + obs = next_obs + + agent.decay_epsilon() +``` + + + +## Visualising the policy + + + + + +Hopefully this tutorial helped you get a grip of how to interact with Gymnasium environments and sets you on a journey to solve many more RL challenges. + +It is recommended that you solve this environment by yourself (project based learning is really effective!). You can apply your favorite discrete RL algorithm or give Monte Carlo ES a try (covered in `Sutton & Barto <http://incompleteideas.net/book/the-book-2nd.html>`_, section 5.3) - this way you can compare your results directly to the book. + +Best of luck! diff --git a/docs/tutorials/gymnasium_basics/environment_creation.py b/docs/tutorials/gymnasium_basics/environment_creation.py index 079e86492..74f2392a6 100644 --- a/docs/tutorials/gymnasium_basics/environment_creation.py +++ b/docs/tutorials/gymnasium_basics/environment_creation.py @@ -22,7 +22,7 @@ pipx install copier Alternative solutions -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~ Install Copier with Pip or Conda: @@ -98,6 +98,10 @@ An episode in this environment (with ``size=5``) might look like this: + .. image:: /_static/videos/tutorials/environment-creation-example-episode.gif + :width: 400 + :alt: Example episode of the custom environment + where the blue dot is the agent and the red square represents the target. @@ -111,7 +115,7 @@ # Our custom environment will inherit from the abstract class # ``gymnasium.Env``. You shouldn’t forget to add the ``metadata`` # attribute to your class. There, you should specify the render-modes that -# are supported by your environment (e.g. ``"human"``, ``"rgb_array"``, +# are supported by your environment (e.g., ``"human"``, ``"rgb_array"``, # ``"ansi"``) and the framerate at which your environment should be # rendered. Every environment should support ``None`` as render-mode; you # don’t need to add it in the metadata. In ``GridWorldEnv``, we will @@ -141,10 +145,10 @@ class Actions(Enum): - right = 0 - up = 1 - left = 2 - down = 3 + RIGHT = 0 + UP = 1 + LEFT = 2 + DOWN = 3 class GridWorldEnv(gym.Env): @@ -162,6 +166,8 @@ def __init__(self, render_mode=None, size=5): "target": spaces.Box(0, size - 1, shape=(2,), dtype=int), } ) + self._agent_location = np.array([-1, -1], dtype=int) + self._target_location = np.array([-1, -1], dtype=int) # We have 4 actions, corresponding to "right", "up", "left", "down" self.action_space = spaces.Discrete(4) @@ -172,10 +178,10 @@ def __init__(self, render_mode=None, size=5): i.e. 0 corresponds to "right", 1 to "up" etc. """ self._action_to_direction = { - Actions.right: np.array([1, 0]), - Actions.up: np.array([0, 1]), - Actions.left: np.array([-1, 0]), - Actions.down: np.array([0, -1]), + Actions.RIGHT.value: np.array([1, 0]), + Actions.UP.value: np.array([0, 1]), + Actions.LEFT.value: np.array([-1, 0]), + Actions.DOWN.value: np.array([0, -1]), } assert render_mode is None or render_mode in self.metadata["render_modes"] @@ -218,7 +224,7 @@ def _get_info(self): # %% # Oftentimes, info will also contain some data that is only available -# inside the ``step`` method (e.g. individual reward terms). In that case, +# inside the ``step`` method (e.g., individual reward terms). In that case, # we would have to update the dictionary that is returned by ``_get_info`` # in ``step``. @@ -443,8 +449,6 @@ def close(self): # +----------------------+-----------+-----------+---------------------------------------------------------------------------------------------------------------+ # | ``order_enforce`` | ``bool`` | ``True`` | Whether to wrap the environment in an ``OrderEnforcing`` wrapper | # +----------------------+-----------+-----------+---------------------------------------------------------------------------------------------------------------+ -# | ``autoreset`` | ``bool`` | ``False`` | Whether to wrap the environment in an ``AutoResetWrapper`` | -# +----------------------+-----------+-----------+---------------------------------------------------------------------------------------------------------------+ # | ``kwargs`` | ``dict`` | ``{}`` | The default kwargs to pass to the environment class | # +----------------------+-----------+-----------+---------------------------------------------------------------------------------------------------------------+ # diff --git a/docs/tutorials/gymnasium_basics/implementing_custom_wrappers.py b/docs/tutorials/gymnasium_basics/implementing_custom_wrappers.py index f0eb9a344..4ba67720a 100644 --- a/docs/tutorials/gymnasium_basics/implementing_custom_wrappers.py +++ b/docs/tutorials/gymnasium_basics/implementing_custom_wrappers.py @@ -112,6 +112,7 @@ def reward(self, r: SupportsFloat) -> SupportsFloat: # - You can set a new action or observation space by defining ``self.action_space`` or ``self.observation_space`` in ``__init__``, respectively # - You can set new metadata and reward range by defining ``self.metadata`` and ``self.reward_range`` in ``__init__``, respectively # - You can override :meth:`gymnasium.Wrapper.step`, :meth:`gymnasium.Wrapper.render`, :meth:`gymnasium.Wrapper.close` etc. +# # If you do this, you can access the environment that was passed # to your wrapper (which *still* might be wrapped in some other wrapper) by accessing the attribute :attr:`env`. # diff --git a/gymnasium/wrappers/stateful_observation.py b/gymnasium/wrappers/stateful_observation.py index 6f211d72f..a77f7c230 100644 --- a/gymnasium/wrappers/stateful_observation.py +++ b/gymnasium/wrappers/stateful_observation.py @@ -412,7 +412,7 @@ class NormalizeObservation( ): """Normalizes observations to be centered at the mean with unit variance. - The property :prop:`_update_running_mean` allows to freeze/continue the running mean calculation of the observation + The property :attr:`update_running_mean` allows to freeze/continue the running mean calculation of the observation statistics. If ``True`` (default), the ``RunningMeanStd`` will get updated every time ``step`` or ``reset`` is called. If ``False``, the calculated statistics are used but not updated anymore; this may be used during evaluation. diff --git a/gymnasium/wrappers/vector/common.py b/gymnasium/wrappers/vector/common.py index 2e89fd5e1..d019b4a2d 100644 --- a/gymnasium/wrappers/vector/common.py +++ b/gymnasium/wrappers/vector/common.py @@ -65,14 +65,14 @@ class RecordEpisodeStatistics(VectorWrapper): def __init__( self, env: VectorEnv, - deque_size: int = 100, + buffer_length: int = 100, stats_key: str = "episode", ): """This wrapper will keep track of cumulative rewards and episode lengths. Args: env (Env): The environment to apply the wrapper - deque_size: The size of the buffers :attr:`return_queue` and :attr:`length_queue` + buffer_length: The size of the buffers :attr:`return_queue`, :attr:`length_queue` and :attr:`time_queue` stats_key: The info key to save the data """ super().__init__(env) @@ -84,9 +84,9 @@ def __init__( self.episode_returns: np.ndarray = np.zeros(()) self.episode_lengths: np.ndarray = np.zeros(()) - self.time_queue = deque(maxlen=deque_size) - self.return_queue = deque(maxlen=deque_size) - self.length_queue = deque(maxlen=deque_size) + self.time_queue = deque(maxlen=buffer_length) + self.return_queue = deque(maxlen=buffer_length) + self.length_queue = deque(maxlen=buffer_length) def reset( self, From ede2ed1af2146bd9d3a3129418eb929636c78d96 Mon Sep 17 00:00:00 2001 From: Kallinteris Andreas <30759571+Kallinteris-Andreas@users.noreply.github.com> Date: Fri, 8 Dec 2023 19:48:21 +0200 Subject: [PATCH 4/4] Stop exporting `MuJoCo-v2` environment bases classes and export `MujocoRenderer` (#827) --- gymnasium/envs/__init__.py | 22 +++++++++++----------- gymnasium/envs/mujoco/__init__.py | 17 ++--------------- 2 files changed, 13 insertions(+), 26 deletions(-) diff --git a/gymnasium/envs/__init__.py b/gymnasium/envs/__init__.py index 507d075b4..b997f2784 100644 --- a/gymnasium/envs/__init__.py +++ b/gymnasium/envs/__init__.py @@ -177,7 +177,7 @@ register( id="Reacher-v2", - entry_point="gymnasium.envs.mujoco:ReacherEnv", + entry_point="gymnasium.envs.mujoco.reacher:ReacherEnv", max_episode_steps=50, reward_threshold=-3.75, ) @@ -198,7 +198,7 @@ register( id="Pusher-v2", - entry_point="gymnasium.envs.mujoco:PusherEnv", + entry_point="gymnasium.envs.mujoco.pucher:PusherEnv", max_episode_steps=100, reward_threshold=0.0, ) @@ -221,7 +221,7 @@ register( id="InvertedPendulum-v2", - entry_point="gymnasium.envs.mujoco:InvertedPendulumEnv", + entry_point="gymnasium.envs.mujoco.inverted_pendulum:InvertedPendulumEnv", max_episode_steps=1000, reward_threshold=950.0, ) @@ -242,7 +242,7 @@ register( id="InvertedDoublePendulum-v2", - entry_point="gymnasium.envs.mujoco:InvertedDoublePendulumEnv", + entry_point="gymnasium.envs.mujoco.inverted_double_pendulum:InvertedDoublePendulumEnv", max_episode_steps=1000, reward_threshold=9100.0, ) @@ -265,7 +265,7 @@ register( id="HalfCheetah-v2", - entry_point="gymnasium.envs.mujoco:HalfCheetahEnv", + entry_point="gymnasium.envs.mujoco.half_cheetah:HalfCheetahEnv", max_episode_steps=1000, reward_threshold=4800.0, ) @@ -293,7 +293,7 @@ register( id="Hopper-v2", - entry_point="gymnasium.envs.mujoco:HopperEnv", + entry_point="gymnasium.envs.mujoco.hopper:HopperEnv", max_episode_steps=1000, reward_threshold=3800.0, ) @@ -321,7 +321,7 @@ register( id="Swimmer-v2", - entry_point="gymnasium.envs.mujoco:SwimmerEnv", + entry_point="gymnasium.envs.mujoco.swimmer:SwimmerEnv", max_episode_steps=1000, reward_threshold=360.0, ) @@ -350,7 +350,7 @@ register( id="Walker2d-v2", max_episode_steps=1000, - entry_point="gymnasium.envs.mujoco:Walker2dEnv", + entry_point="gymnasium.envs.mujoco.walker2d:Walker2dEnv", ) register( @@ -373,7 +373,7 @@ register( id="Ant-v2", - entry_point="gymnasium.envs.mujoco:AntEnv", + entry_point="gymnasium.envs.mujoco.ant:AntEnv", max_episode_steps=1000, reward_threshold=6000.0, ) @@ -401,7 +401,7 @@ register( id="Humanoid-v2", - entry_point="gymnasium.envs.mujoco:HumanoidEnv", + entry_point="gymnasium.envs.mujoco.humanoid:HumanoidEnv", max_episode_steps=1000, ) @@ -425,7 +425,7 @@ register( id="HumanoidStandup-v2", - entry_point="gymnasium.envs.mujoco:HumanoidStandupEnv", + entry_point="gymnasium.envs.mujoco.humanoidstandup:HumanoidStandupEnv", max_episode_steps=1000, ) diff --git a/gymnasium/envs/mujoco/__init__.py b/gymnasium/envs/mujoco/__init__.py index a5ed9d2a8..a8d029e60 100644 --- a/gymnasium/envs/mujoco/__init__.py +++ b/gymnasium/envs/mujoco/__init__.py @@ -1,15 +1,2 @@ -from gymnasium.envs.mujoco.mujoco_env import MujocoEnv, MuJocoPyEnv # isort:skip - -# ^^^^^ so that user gets the correct error -# message if mujoco is not installed correctly -from gymnasium.envs.mujoco.ant import AntEnv -from gymnasium.envs.mujoco.half_cheetah import HalfCheetahEnv -from gymnasium.envs.mujoco.hopper import HopperEnv -from gymnasium.envs.mujoco.humanoid import HumanoidEnv -from gymnasium.envs.mujoco.humanoidstandup import HumanoidStandupEnv -from gymnasium.envs.mujoco.inverted_double_pendulum import InvertedDoublePendulumEnv -from gymnasium.envs.mujoco.inverted_pendulum import InvertedPendulumEnv -from gymnasium.envs.mujoco.pusher import PusherEnv -from gymnasium.envs.mujoco.reacher import ReacherEnv -from gymnasium.envs.mujoco.swimmer import SwimmerEnv -from gymnasium.envs.mujoco.walker2d import Walker2dEnv +from gymnasium.envs.mujoco.mujoco_env import MujocoEnv, MuJocoPyEnv +from gymnasium.envs.mujoco.mujoco_rendering import MujocoRenderer