diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 87496fce6..8c0ac19e9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -48,6 +48,13 @@ repos:
rev: 23.3.0
hooks:
- id: black
+ - repo: https://github.com/pre-commit/mirrors-mypy
+ rev: "v1.6.1"
+ hooks:
+ - id: mypy
+ exclude: docs/
+ args: [--ignore-missing-imports]
+ additional_dependencies: [numpy==1.26.1]
# - repo: https://github.com/pycqa/pydocstyle
# rev: 6.3.0
# hooks:
diff --git a/docs/CNAME b/docs/CNAME
deleted file mode 100644
index 41de4016c..000000000
--- a/docs/CNAME
+++ /dev/null
@@ -1 +0,0 @@
-metaworld.farama.org
\ No newline at end of file
diff --git a/docs/_static/img/favicon.svg b/docs/_static/img/favicon.svg
index 48f928193..743f52246 100644
--- a/docs/_static/img/favicon.svg
+++ b/docs/_static/img/favicon.svg
@@ -1,115 +1,161 @@
-
-
diff --git a/docs/_static/img/metaworld_black.svg b/docs/_static/img/metaworld_black.svg
index c0bb7eb46..473a6ba01 100644
--- a/docs/_static/img/metaworld_black.svg
+++ b/docs/_static/img/metaworld_black.svg
@@ -1,111 +1,161 @@
-
-
diff --git a/docs/_static/img/metaworld_white.svg b/docs/_static/img/metaworld_white.svg
index bd41903e4..8c6a92a31 100644
--- a/docs/_static/img/metaworld_white.svg
+++ b/docs/_static/img/metaworld_white.svg
@@ -1,115 +1,162 @@
-
-
diff --git a/docs/_static/metaworld-text.svg b/docs/_static/metaworld-text.svg
new file mode 100644
index 000000000..a9a6497d1
--- /dev/null
+++ b/docs/_static/metaworld-text.svg
@@ -0,0 +1,202 @@
+
+
+
+
diff --git a/docs/_static/mt10.gif b/docs/_static/mt10.gif
new file mode 100644
index 000000000..bea6ce710
Binary files /dev/null and b/docs/_static/mt10.gif differ
diff --git a/docs/index.md b/docs/index.md
index d6c57e091..330d76293 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -4,7 +4,7 @@ firstpage:
lastpage:
---
-```{project-logo} _static/metaworld-text.png
+```{project-logo} _static/metaworld-text.svg
:alt: Metaworld Logo
```
@@ -12,7 +12,7 @@ lastpage:
Meta-World is an open-source simulated benchmark for meta-reinforcement learning and multi-task learning consisting of 50 distinct robotic manipulation tasks.
```
-```{figure} _static/REPLACE_ME.gif
+```{figure} _static/mt10.gif
:alt: REPLACE ME
:width: 500
```
@@ -33,15 +33,17 @@ env.set_task(task) # Set task
obs = env.reset() # Reset environment
a = env.action_space.sample() # Sample an action
-obs, reward, done, info = env.step(a)
+obs, reward, terminate, truncate, info = env.step(a)
```
```{toctree}
:hidden:
:caption: Introduction
-introduction/installation
introduction/basic_usage
+installation/installation
+rendering/rendering
+usage/basic_usage
```
diff --git a/docs/introduction/installation.md b/docs/installation/installation.md
similarity index 77%
rename from docs/introduction/installation.md
rename to docs/installation/installation.md
index 8eb172a43..ec1785c4c 100644
--- a/docs/introduction/installation.md
+++ b/docs/installation/installation.md
@@ -15,7 +15,7 @@ cd Metaworld
pip install -e .
```
-For users attempting to reproduce results found in the Meta-World paper please use this command:
+For users attempting to reproduce results found in the [Meta-World paper](https://arxiv.org/abs/1910.10897) please use this command:
```
pip install git+https://github.com/Farama-Foundation/Metaworld.git@04be337a12305e393c0caf0cbf5ec7755c7c8feb
```
diff --git a/docs/introduction/basic_usage.md b/docs/introduction/basic_usage.md
index 3580e733d..b5e424707 100644
--- a/docs/introduction/basic_usage.md
+++ b/docs/introduction/basic_usage.md
@@ -24,11 +24,6 @@ For each of those environments, a task must be assigned to it using
respectively.
`Tasks` can only be assigned to environments which have a key in
`benchmark.train_classes` or `benchmark.test_classes` matching `task.env_name`.
-Please see the sections [Running ML1, MT1](#running-ml1-or-mt1) and [Running ML10, ML45, MT10, MT50](#running-a-benchmark)
-for more details.
-
-You may wish to only access individual environments used in the Metaworld benchmark for your research. See the
-[Accessing Single Goal Environments](#accessing-single-goal-environments) for more details.
### Seeding a Benchmark Instance
@@ -56,7 +51,7 @@ env.set_task(task) # Set task
obs = env.reset() # Reset environment
a = env.action_space.sample() # Sample an action
-obs, reward, done, info = env.step(a) # Step the environment with the sampled random action
+obs, reward, terminate, truncate, info = env.step(a) # Step the environment with the sampled random action
```
__MT1__ can be run the same way except that it does not contain any `test_tasks`
@@ -80,7 +75,7 @@ for name, env_cls in ml10.train_classes.items():
for env in training_envs:
obs = env.reset() # Reset environment
a = env.action_space.sample() # Sample an action
- obs, reward, done, info = env.step(a) # Step the environment with the sampled random action
+ obs, reward, terminate, truncate, info = env.step(a) # Step the environment with the sampled random action
```
Create an environment with test tasks (this only works for ML10 and ML45, since MT10 and MT50 don't have a separate set of test tasks):
```python
@@ -100,7 +95,7 @@ for name, env_cls in ml10.test_classes.items():
for env in testing_envs:
obs = env.reset() # Reset environment
a = env.action_space.sample() # Sample an action
- obs, reward, done, info = env.step(a) # Step the environment with the sampled random action
+ obs, reward, terminate, truncate, info = env.step(a) # Step the environment with the sampled random action
```
## Accessing Single Goal Environments
@@ -124,7 +119,7 @@ door_open_goal_hidden_cls = ALL_V2_ENVIRONMENTS_GOAL_HIDDEN["door-open-v2-goal-h
env = door_open_goal_hidden_cls()
env.reset() # Reset environment
a = env.action_space.sample() # Sample an action
-obs, reward, done, info = env.step(a) # Step the environment with the sampled random action
+obs, reward, terminate, truncate, info = env.step(a) # Step the environment with the sampled random action
assert (obs[-3:] == np.zeros(3)).all() # goal will be zeroed out because env is HiddenGoal
# You can choose to initialize the random seed of the environment.
@@ -136,7 +131,7 @@ env1.reset() # Reset environment
env2.reset()
a1 = env1.action_space.sample() # Sample an action
a2 = env2.action_space.sample()
-next_obs1, _, _, _ = env1.step(a1) # Step the environment with the sampled random action
+next_obs1, _, _, _, _ = env1.step(a1) # Step the environment with the sampled random action
next_obs2, _, _, _ = env2.step(a2)
assert (next_obs1[-3:] == next_obs2[-3:]).all() # 2 envs initialized with the same seed will have the same goal
@@ -147,8 +142,8 @@ env1.reset() # Reset environment
env3.reset()
a1 = env1.action_space.sample() # Sample an action
a3 = env3.action_space.sample()
-next_obs1, _, _, _ = env1.step(a1) # Step the environment with the sampled random action
-next_obs3, _, _, _ = env3.step(a3)
+next_obs1, _, _, _, _ = env1.step(a1) # Step the environment with the sampled random action
+next_obs3, _, _, _, _ = env3.step(a3)
assert not (next_obs1[-3:] == next_obs3[-3:]).all() # 2 envs initialized with different seeds will have different goals
assert not (next_obs1[-3:] == np.zeros(3)).all() # The env's are goal observable, meaning the goal is not zero'd out
diff --git a/docs/rendering/rendering.md b/docs/rendering/rendering.md
new file mode 100644
index 000000000..2fb740cea
--- /dev/null
+++ b/docs/rendering/rendering.md
@@ -0,0 +1,49 @@
+# Rendering
+
+Each Meta-World environment uses Gymnasium to handle the rendering functions following the [`gymnasium.MujocoEnv`](https://github.com/Farama-Foundation/Gymnasium/blob/94a7909042e846c496bcf54f375a5d0963da2b31/gymnasium/envs/mujoco/mujoco_env.py#L184) interface.
+
+Upon environment creation a user can select a render mode in ('rgb_array', 'human').
+
+For example:
+
+```python
+import metaworld
+import random
+
+print(metaworld.ML1.ENV_NAMES) # Check out the available environments
+
+env_name = '' # Pick an environment name
+
+render_mode = '' # set a render mode
+
+ml1 = metaworld.ML1(env_name) # Construct the benchmark, sampling tasks
+
+env = ml1.train_classes[env_name](render_mode=render_mode)
+task = random.choice(ml1.train_tasks)
+env.set_task(task) # Set task
+
+obs = env.reset() # Reset environment
+a = env.action_space.sample() # Sample an action
+obs, reward, terminate, truncate, info = env.step(a) # Step the environment with the sampled random action
+```
+
+## Render from a specific camera
+
+In addition to the base render functions, Meta-World supports multiple camera positions.
+
+```python
+camera_name = '' # one of: ['corner', 'corner2', 'corner3', 'topview', 'behindGripper', 'gripperPOV']
+
+env = ml1.train_classes[env_name](render_mode=render_mode, camera_name=camera_name)
+
+```
+
+The ID of the camera (from Mujoco) can also be passed if known.
+
+```python
+
+camera_id = '' # this is an integer that represents the camera ID from Mujoco
+
+env = ml1.train_classes[env_name](render_mode=render_mode, camera_id=camera_id)
+
+```
diff --git a/docs/usage/basic_usage.md b/docs/usage/basic_usage.md
new file mode 100644
index 000000000..cc2443ff9
--- /dev/null
+++ b/docs/usage/basic_usage.md
@@ -0,0 +1,36 @@
+---
+layout: "contents"
+title: Generate data with expert policies
+firstpage:
+---
+
+# Generate data with expert policies
+
+## Expert Policies
+For each individual environment in Meta-World (i.e. reach, basketball, sweep) there are expert policies that solve the task. These policies can be used to generate expert data for imitation learning tasks.
+
+## Using Expert Policies
+The below example provides sample code for the reach environment. This code can be extended to the ML10/ML45/MT10/MT50 sets if a list of policies is maintained.
+
+
+```python
+from metaworld import MT1
+
+from metaworld.policies.sawyer_reach_v2_policy import SawyerReachV2Policy as p
+
+mt1 = MT1('reach-v2', seed=42)
+env = mt1.train_classes['reach-v2']()
+env.set_task(mt1.train_tasks[0])
+obs, info = env.reset()
+
+policy = p()
+
+done = False
+
+while not done:
+ a = policy.get_action(obs)
+ obs, _, _, _, info = env.step(a)
+ done = int(info['success']) == 1
+
+
+```
diff --git a/metaworld/__init__.py b/metaworld/__init__.py
index 24f7b8c76..b78036e26 100644
--- a/metaworld/__init__.py
+++ b/metaworld/__init__.py
@@ -1,40 +1,37 @@
-"""Proposal for a simple, understandable MetaWorld API."""
+"""The public-facing Metaworld API."""
+
+from __future__ import annotations
+
import abc
import pickle
from collections import OrderedDict
-from typing import List, NamedTuple, Type
+from typing import Any
import numpy as np
+import numpy.typing as npt
import metaworld.envs.mujoco.env_dict as _env_dict
-
-EnvName = str
-
-
-class Task(NamedTuple):
- """All data necessary to describe a single MDP.
-
- Should be passed into a MetaWorldEnv's set_task method.
- """
-
- env_name: EnvName
- data: bytes # Contains env parameters like random_init and *a* goal
+from metaworld.types import Task
-class MetaWorldEnv:
+class MetaWorldEnv(abc.ABC):
"""Environment that requires a task before use.
Takes no arguments to its constructor, and raises an exception if used
before `set_task` is called.
"""
+ @abc.abstractmethod
def set_task(self, task: Task) -> None:
- """Set the task.
+ """Sets the task.
- Raises:
- ValueError: If task.env_name is different from the current task.
+ Args:
+ task: The task to set.
+ Raises:
+ ValueError: If `task.env_name` is different from the current task.
"""
+ raise NotImplementedError
class Benchmark(abc.ABC):
@@ -43,83 +40,135 @@ class Benchmark(abc.ABC):
When used to evaluate an algorithm, only a single instance should be used.
"""
+ _train_classes: _env_dict.EnvDict
+ _test_classes: _env_dict.EnvDict
+ _train_tasks: list[Task]
+ _test_tasks: list[Task]
+
@abc.abstractmethod
def __init__(self):
pass
@property
- def train_classes(self) -> "OrderedDict[EnvName, Type]":
- """Get all of the environment classes used for training."""
+ def train_classes(self) -> _env_dict.EnvDict:
+ """Returns all of the environment classes used for training."""
return self._train_classes
@property
- def test_classes(self) -> "OrderedDict[EnvName, Type]":
- """Get all of the environment classes used for testing."""
+ def test_classes(self) -> _env_dict.EnvDict:
+ """Returns all of the environment classes used for testing."""
return self._test_classes
@property
- def train_tasks(self) -> List[Task]:
- """Get all of the training tasks for this benchmark."""
+ def train_tasks(self) -> list[Task]:
+ """Returns all of the training tasks for this benchmark."""
return self._train_tasks
@property
- def test_tasks(self) -> List[Task]:
- """Get all of the test tasks for this benchmark."""
+ def test_tasks(self) -> list[Task]:
+ """Returns all of the test tasks for this benchmark."""
return self._test_tasks
_ML_OVERRIDE = dict(partially_observable=True)
+"""The overrides for the Meta-Learning benchmarks. Disables the inclusion of the goal position in the observation."""
+
_MT_OVERRIDE = dict(partially_observable=False)
+"""The overrides for the Multi-Task benchmarks. Enables the inclusion of the goal position in the observation."""
_N_GOALS = 50
+"""The number of goals to generate for each environment."""
+
+def _encode_task(env_name, data) -> Task:
+ """Instantiates a new `Task` object after pickling the data.
-def _encode_task(env_name, data):
+ Args:
+ env_name: The name of the environment.
+ data: The task data (will be pickled).
+
+ Returns:
+ A `Task` object.
+ """
return Task(env_name=env_name, data=pickle.dumps(data))
-def _make_tasks(classes, args_kwargs, kwargs_override, seed=None):
+def _make_tasks(
+ classes: _env_dict.EnvDict,
+ args_kwargs: _env_dict.EnvArgsKwargsDict,
+ kwargs_override: dict,
+ seed: int | None = None,
+) -> list[Task]:
+ """Initialises goals for a given set of environments.
+
+ Args:
+ classes: The environment classes as an `EnvDict`.
+ args_kwargs: The environment arguments and keyword arguments.
+ kwargs_override: Any kwarg overrides.
+ seed: The random seed to use.
+
+ Returns:
+ A flat list of `Task` objects, `_N_GOALS` for each environment in `classes`.
+ """
+ # Cache existing random state
if seed is not None:
st0 = np.random.get_state()
np.random.seed(seed)
+
tasks = []
for env_name, args in args_kwargs.items():
+ kwargs = args["kwargs"].copy()
+ assert isinstance(kwargs, dict)
assert len(args["args"]) == 0
+
+ # Init env
env = classes[env_name]()
env._freeze_rand_vec = False
env._set_task_called = True
- rand_vecs = []
- kwargs = args["kwargs"].copy()
+ rand_vecs: list[npt.NDArray[Any]] = []
+
+ # Set task
del kwargs["task_id"]
env._set_task_inner(**kwargs)
- for _ in range(_N_GOALS):
+
+ for _ in range(_N_GOALS): # Generate random goals
env.reset()
+ assert env._last_rand_vec is not None
rand_vecs.append(env._last_rand_vec)
+
unique_task_rand_vecs = np.unique(np.array(rand_vecs), axis=0)
- assert unique_task_rand_vecs.shape[0] == _N_GOALS, unique_task_rand_vecs.shape[
- 0
- ]
+ assert (
+ unique_task_rand_vecs.shape[0] == _N_GOALS
+ ), f"Only generated {unique_task_rand_vecs.shape[0]} unique goals, not {_N_GOALS}"
env.close()
+
+ # Create a task for each random goal
for rand_vec in rand_vecs:
kwargs = args["kwargs"].copy()
+ assert isinstance(kwargs, dict)
del kwargs["task_id"]
+
kwargs.update(dict(rand_vec=rand_vec, env_cls=classes[env_name]))
kwargs.update(kwargs_override)
+
tasks.append(_encode_task(env_name, kwargs))
+
del env
+
+ # Restore random state
if seed is not None:
np.random.set_state(st0)
+
return tasks
-def _ml1_env_names():
- tasks = list(_env_dict.ML1_V2["train"])
- assert len(tasks) == 50
- return tasks
+# MT Benchmarks
-class ML1(Benchmark):
- ENV_NAMES = _ml1_env_names()
+class MT1(Benchmark):
+ """The MT1 benchmark. A goal-conditioned RL environment for a single Metaworld task."""
+
+ ENV_NAMES = list(_env_dict.ALL_V2_ENVIRONMENTS.keys())
def __init__(self, env_name, seed=None):
super().__init__()
@@ -127,48 +176,88 @@ def __init__(self, env_name, seed=None):
raise ValueError(f"{env_name} is not a V2 environment")
cls = _env_dict.ALL_V2_ENVIRONMENTS[env_name]
self._train_classes = OrderedDict([(env_name, cls)])
- self._test_classes = self._train_classes
- self._train_ = OrderedDict([(env_name, cls)])
+ self._test_classes = OrderedDict([(env_name, cls)])
args_kwargs = _env_dict.ML1_args_kwargs[env_name]
self._train_tasks = _make_tasks(
- self._train_classes, {env_name: args_kwargs}, _ML_OVERRIDE, seed=seed
+ self._train_classes, {env_name: args_kwargs}, _MT_OVERRIDE, seed=seed
)
- self._test_tasks = _make_tasks(
- self._test_classes,
- {env_name: args_kwargs},
- _ML_OVERRIDE,
- seed=(seed + 1 if seed is not None else seed),
+
+ self._test_tasks = []
+
+
+class MT10(Benchmark):
+ """The MT10 benchmark. Contains 10 tasks in its train set. Has an empty test set."""
+
+ def __init__(self, seed=None):
+ super().__init__()
+ self._train_classes = _env_dict.MT10_V2
+ self._test_classes = OrderedDict()
+ train_kwargs = _env_dict.MT10_V2_ARGS_KWARGS
+ self._train_tasks = _make_tasks(
+ self._train_classes, train_kwargs, _MT_OVERRIDE, seed=seed
)
+ self._test_tasks = []
+ self._test_classes = []
+
+
+class MT50(Benchmark):
+ """The MT50 benchmark. Contains all (50) tasks in its train set. Has an empty test set."""
+
+ def __init__(self, seed=None):
+ super().__init__()
+ self._train_classes = _env_dict.MT50_V2
+ self._test_classes = OrderedDict()
+ train_kwargs = _env_dict.MT50_V2_ARGS_KWARGS
+ self._train_tasks = _make_tasks(
+ self._train_classes, train_kwargs, _MT_OVERRIDE, seed=seed
+ )
+
+ self._test_tasks = []
+ self._test_classes = []
+
+
+# ML Benchmarks
-class MT1(Benchmark):
- ENV_NAMES = _ml1_env_names()
+
+class ML1(Benchmark):
+ """The ML1 benchmark. A meta-RL environment for a single Metaworld task. The train and test set contain different goal positions.
+ The goal position is not part of the observation."""
+
+ ENV_NAMES = list(_env_dict.ALL_V2_ENVIRONMENTS.keys())
def __init__(self, env_name, seed=None):
super().__init__()
if env_name not in _env_dict.ALL_V2_ENVIRONMENTS:
raise ValueError(f"{env_name} is not a V2 environment")
+
cls = _env_dict.ALL_V2_ENVIRONMENTS[env_name]
self._train_classes = OrderedDict([(env_name, cls)])
- self._test_classes = OrderedDict([(env_name, cls)])
+ self._test_classes = self._train_classes
args_kwargs = _env_dict.ML1_args_kwargs[env_name]
self._train_tasks = _make_tasks(
- self._train_classes, {env_name: args_kwargs}, _MT_OVERRIDE, seed=seed
+ self._train_classes, {env_name: args_kwargs}, _ML_OVERRIDE, seed=seed
+ )
+ self._test_tasks = _make_tasks(
+ self._test_classes,
+ {env_name: args_kwargs},
+ _ML_OVERRIDE,
+ seed=(seed + 1 if seed is not None else seed),
)
-
- self._test_tasks = []
class ML10(Benchmark):
+ """The ML10 benchmark. Contains 10 tasks in its train set and 5 tasks in its test set. The goal position is not part of the observation."""
+
def __init__(self, seed=None):
super().__init__()
self._train_classes = _env_dict.ML10_V2["train"]
self._test_classes = _env_dict.ML10_V2["test"]
- train_kwargs = _env_dict.ml10_train_args_kwargs
+ train_kwargs = _env_dict.ML10_ARGS_KWARGS["train"]
- test_kwargs = _env_dict.ml10_test_args_kwargs
+ test_kwargs = _env_dict.ML10_ARGS_KWARGS["test"]
self._train_tasks = _make_tasks(
self._train_classes, train_kwargs, _ML_OVERRIDE, seed=seed
)
@@ -179,12 +268,14 @@ def __init__(self, seed=None):
class ML45(Benchmark):
+ """The ML45 benchmark. Contains 45 tasks in its train set and 5 tasks in its test set (50 in total). The goal position is not part of the observation."""
+
def __init__(self, seed=None):
super().__init__()
self._train_classes = _env_dict.ML45_V2["train"]
self._test_classes = _env_dict.ML45_V2["test"]
- train_kwargs = _env_dict.ml45_train_args_kwargs
- test_kwargs = _env_dict.ml45_test_args_kwargs
+ train_kwargs = _env_dict.ML45_ARGS_KWARGS["train"]
+ test_kwargs = _env_dict.ML45_ARGS_KWARGS["test"]
self._train_tasks = _make_tasks(
self._train_classes, train_kwargs, _ML_OVERRIDE, seed=seed
@@ -194,32 +285,4 @@ def __init__(self, seed=None):
)
-class MT10(Benchmark):
- def __init__(self, seed=None):
- super().__init__()
- self._train_classes = _env_dict.MT10_V2
- self._test_classes = OrderedDict()
- train_kwargs = _env_dict.MT10_V2_ARGS_KWARGS
- self._train_tasks = _make_tasks(
- self._train_classes, train_kwargs, _MT_OVERRIDE, seed=seed
- )
-
- self._test_tasks = []
- self._test_classes = []
-
-
-class MT50(Benchmark):
- def __init__(self, seed=None):
- super().__init__()
- self._train_classes = _env_dict.MT50_V2
- self._test_classes = OrderedDict()
- train_kwargs = _env_dict.MT50_V2_ARGS_KWARGS
-
- self._train_tasks = _make_tasks(
- self._train_classes, train_kwargs, _MT_OVERRIDE, seed=seed
- )
-
- self._test_tasks = []
-
-
__all__ = ["ML1", "MT1", "ML10", "MT10", "ML45", "MT50"]
diff --git a/metaworld/envs/asset_path_utils.py b/metaworld/envs/asset_path_utils.py
index 923e05806..ccbcdb0e5 100644
--- a/metaworld/envs/asset_path_utils.py
+++ b/metaworld/envs/asset_path_utils.py
@@ -1,12 +1,34 @@
-import os
+"""Set of utilities for retrieving asset paths for the environments."""
-ENV_ASSET_DIR_V1 = os.path.join(os.path.dirname(__file__), "assets_v1")
-ENV_ASSET_DIR_V2 = os.path.join(os.path.dirname(__file__), "assets_v2")
+from __future__ import annotations
+from pathlib import Path
-def full_v1_path_for(file_name):
- return os.path.join(ENV_ASSET_DIR_V1, file_name)
+_CURRENT_FILE_DIR = Path(__file__).parent.absolute()
+ENV_ASSET_DIR_V1 = _CURRENT_FILE_DIR / "assets_v1"
+ENV_ASSET_DIR_V2 = _CURRENT_FILE_DIR / "assets_v2"
-def full_v2_path_for(file_name):
- return os.path.join(ENV_ASSET_DIR_V2, file_name)
+
+def full_v1_path_for(file_name: str) -> str:
+ """Retrieves the full, absolute path for a given V1 asset
+
+ Args:
+ file_name: Name of the asset file. Can include subdirectories.
+
+ Returns:
+ The full path to the asset file.
+ """
+ return str(ENV_ASSET_DIR_V1 / file_name)
+
+
+def full_v2_path_for(file_name: str) -> str:
+ """Retrieves the full, absolute path for a given V2 asset
+
+ Args:
+ file_name: Name of the asset file. Can include subdirectories.
+
+ Returns:
+ The full path to the asset file.
+ """
+ return str(ENV_ASSET_DIR_V2 / file_name)
diff --git a/metaworld/envs/assets_updated/sawyer_xyz/dm_control_pick_place.ipynb b/metaworld/envs/assets_updated/sawyer_xyz/dm_control_pick_place.ipynb
deleted file mode 100644
index 477cd2c6e..000000000
--- a/metaworld/envs/assets_updated/sawyer_xyz/dm_control_pick_place.ipynb
+++ /dev/null
@@ -1,1563 +0,0 @@
-{
- "cells": [
- {
- "cell_type": "code",
- "execution_count": 1,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/dm_control/utils/containers.py:30: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated since Python 3.3,and in 3.9 it will stop working\n",
- " class TaggedTasks(collections.Mapping):\n"
- ]
- }
- ],
- "source": [
- "#@title All `dm_control` imports required for this tutorial\n",
- "\n",
- "# The basic mujoco wrapper.\n",
- "from dm_control import mujoco\n",
- "\n",
- "# Access to enums and MuJoCo library functions.\n",
- "from dm_control.mujoco.wrapper.mjbindings import enums\n",
- "from dm_control.mujoco.wrapper.mjbindings import mjlib\n",
- "\n",
- "# PyMJCF\n",
- "from dm_control import mjcf\n",
- "\n",
- "# Composer high level imports\n",
- "from dm_control import composer\n",
- "from dm_control.composer.observation import observable\n",
- "from dm_control.composer import variation\n",
- "\n",
- "# Imports for Composer tutorial example\n",
- "from dm_control.composer.variation import distributions\n",
- "from dm_control.composer.variation import noises\n",
- "from dm_control.locomotion.arenas import floors\n",
- "\n",
- "# Control Suite\n",
- "from dm_control import suite\n",
- "\n",
- "# Run through corridor example\n",
- "from dm_control.locomotion.walkers import cmu_humanoid\n",
- "from dm_control.locomotion.arenas import corridors as corridor_arenas\n",
- "from dm_control.locomotion.tasks import corridors as corridor_tasks\n",
- "\n",
- "# Soccer\n",
- "from dm_control.locomotion import soccer\n",
- "\n",
- "# Manipulation\n",
- "from dm_control import manipulation"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 2,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
- " and should_run_async(code)\n"
- ]
- }
- ],
- "source": [
- "#@title Other imports and helper functions\n",
- "\n",
- "# General\n",
- "import copy\n",
- "import os\n",
- "from IPython.display import clear_output\n",
- "import numpy as np\n",
- "\n",
- "# Graphics-related\n",
- "import matplotlib\n",
- "import matplotlib.animation as animation\n",
- "import matplotlib.pyplot as plt\n",
- "from IPython.display import HTML\n",
- "import PIL.Image\n",
- "\n",
- "# Use svg backend for figure rendering\n",
- "%config InlineBackend.figure_format = 'svg'\n",
- "\n",
- "# Font sizes\n",
- "SMALL_SIZE = 8\n",
- "MEDIUM_SIZE = 10\n",
- "BIGGER_SIZE = 12\n",
- "plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n",
- "plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n",
- "plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n",
- "plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n",
- "plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n",
- "plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n",
- "plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title\n",
- "\n",
- "# Inline video helper function\n",
- "if os.environ.get('COLAB_NOTEBOOK_TEST', False):\n",
- " # We skip video generation during tests, as it is quite expensive.\n",
- " display_video = lambda *args, **kwargs: None\n",
- "else:\n",
- " def display_video(frames, framerate=30):\n",
- " height, width, _ = frames[0].shape\n",
- " dpi = 70\n",
- " orig_backend = matplotlib.get_backend()\n",
- " matplotlib.use('Agg') # Switch to headless 'Agg' to inhibit figure rendering.\n",
- " fig, ax = plt.subplots(1, 1, figsize=(width / dpi, height / dpi), dpi=dpi)\n",
- " matplotlib.use(orig_backend) # Switch back to the original backend.\n",
- " ax.set_axis_off()\n",
- " ax.set_aspect('equal')\n",
- " ax.set_position([0, 0, 1, 1])\n",
- " im = ax.imshow(frames[0])\n",
- " def update(frame):\n",
- " im.set_data(frame)\n",
- " return [im]\n",
- " interval = 1000/framerate\n",
- " anim = animation.FuncAnimation(fig=fig, func=update, frames=frames,\n",
- " interval=interval, blit=True, repeat=False)\n",
- " return HTML(anim.to_html5_video())\n",
- "\n",
- "# Seed numpy's global RNG so that cell outputs are deterministic. We also try to\n",
- "# use RandomState instances that are local to a single cell wherever possible.\n",
- "np.random.seed(42)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 3,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
- " and should_run_async(code)\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUAAAADwCAIAAAD+Tyo8AAALk0lEQVR4nO3dz48jaX3H8fdTZbt7pmcGlqxgFW2iBBAXBFr2x8ywaMOJayIkLlH+gtwi5b+IQKzYc6T8AURKThFHEBKBAwcEJLtKQGIjwiLBMNmd7rZdVU8O1Y+32mW7Pd1jP3b3+6VSq6ba7X5c4099n+epKjdIkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkiRJkrR5IXcDtD8+AX8GI3gBgArehV/Dr/M260YzwFrDG/AQ/gjuQIQCIjQwhRN4DG/Dj+A3udt58xhgrfRF+Ao8D3fgEAoYQARgAjWcwgl8AI/g5/Dv8PvMTb5RBrkboB32ZfhLeA6O4BYMoIQCGgAqmMAhHMAIDuEWvAj/Ar/N2+4bxAqsJf4GXoUXYQj3Uu0tIEADEWqYwCQV4bYOP4b34Lvw89ztvxmswFrkr+EBPAd3YQijtEQIBEI7AI5FpOBsaYPdQAUP4QR+mftV3AAGWD1/BZ9L6R3BPQgwIAwCBUVRANQ0ofkwvW1BbivzBJ6DhwZ4GwywzvtjeAmeh4/CIYygOJu+CoNQDIqyKNu4VrFqaGITqWAAh1DBARzAHXgOPgs/y/1yrjsDrPNeg4/CEYQ0QVUSilAelqEMw+GwLMsylPWkpqEJTdVUcRCpIMAQjlOGD+HzBnjjitwN0C55AT4J985yezZxVZ4NcYcHw4NbB4e3D4cHw6IsymEZikAgDAPDNB86Sj81hNvw6cwv6NozwOr4HBzBNE04FxAIg9AGNTaxbuq6qkfD0XA0PDo6unv37mA4KENJDWV6kjbwJRzCJ3O+mpvALrQ6nod7MIKQju0NsYlAEQog1nF0MBoNR8MwLJpiOpiePj6ty7opmyY0DGAA9dmkF0M4hDvwQdYXda1ZgZV8LB3PA0zT5VY1NIQqxDrGOg6L4dHh0UFxcHd09/bBbSoODg5CCIFABVOYQAkNDOA0TWJrY6zA6rjbOZc7hSkMoSISYxWbpikPyvHxuLhVhBhCE0IMsYrNpKGCCmJaAozTyWHfYpvk3lVyBDU8gY/Anc69CregIE5iLGM9rWvqaT0Nw9BMmmba1OP6LMA11BDTT03SmWFtkgHeFd9LA89i5UqRhj0hzfuG3np3Y9sRjmusfKvkrT/Ax1MCRzCFEiYQaaqmHJWTDybl7bKsSiYUsRg/HtfHdagDNaEJcRqZwEl63gmMzfBmGeCd8AUo1w5wO93bzSrLY9xaHWCggdcf8RYwSedyx+nn29JaUISCyHQ6ZURFFSdxejqtTqvmpGneb87SO4Yp1DDtrGtjDPBOmCuwCwNcnn8AF9Xe2T/nQrswyQV86TF/9zPevAe34QQCVNDAiBhjKENVV4yoYlUNqkExmB5Pp5NpfVI3J+fT28AxnHA2MH5/q3vypjHAu6K8qNtcdB7DGuldFmA6X5vz2//+p7z5KvwOhjCEKTw5G9bGQazKqi7qYlBMqkmIIRDq45opsY6M4QSO4RjG8CTNgZ14DmmzDPBOeHV5gMtFGeai6K7oQtMpxcX5meMA3/43vvZVeJS+dwARRmmSeUjTNARiHduudZxGTqHupPcYpvABnMAftrYLbygDvBNe+/CCxfmqWyyK8bLe8oUBJt0yNJfkmB7/xnv887/y9Qf8YAgTuAuncAgllMRJbJ+iPYF0bqzbvTf4fRjDKby7ld13g3lD/074R7i/aLJqWYZZGd254sySxM6lN6Zst1//4QFffwMO0z1JtyDCoNMpn50urlL/+TR1nsfwPvwK/mOru/EGMsA74Z9SgFcX3lmVXhHd/vwWS+K6cJlluIbvv0hT8I2/4Ad/ni6NjDBMF0u2V2uN09L2oicpvf8HP3UAvHEGeCf859q1d1kXei6xK1wY5ipdjjVbvv+nNIFvvk4s+eGfpCLcprxKXegxZyeBT2EM78D/bnCPqWWAd8I7a5TfonOueMVwd03L6nCTgjmX4f6Wz/xtOlFUwWm6cqMdBv8P/Ncz3kVayEms/O6fT2O3zHa3hHQ2+GlL7kJzJ5nmCvLcPS4Lt9x/mx99PJXsafqQyifwHvziss3SUzLA+S0MandjG7buuSWult7+b+9meJbV2GkDvY3hBJ6k6jyFU5jAu368+1YZ4PweLOkYd7PaPcn0rKLbNXvCYnlu57714Df8sL1zeJJO/L7np7pvmwHOb2F6i85K0du4oWaE5RU4prnnWYw5hUdQw2M4Nrp5GOD8HiwpvN2SW2w4vTMLK/BcdNuV4vfwCE433CCtZIDzW1F4i97GLTSmP+Ltpnq2PBxvvjW6yBbeErpA0cvwwouit/ZfNXcoWXZ8ebit9mgFA5zfw15vuZ+WLXSeZ7qTZysa41tnF/i/kNnDNQrv9tPSr70LF2VngPMLna8LV3aWvejsDHB+Cz8iY+7qqC1/sNSyWx3mFgOcnQHOrM1AP73kSy+de4aX3XtIZ0UZGeDM7q9Mbz/MW9C/u3BFjJWXAc5vRXrJkZk1+88R7m+rSVrGAO+EucTO6l6/AG6hJcuqbr/Bym73Zzqvuf/u3e678DbguXsMNySma55nN/1WKdLLlk9trDFahxU4s24p637I6+plcy2ZW8JFTVJeBjin+5155v6Id8VM0rOdl36q44XD4J3izQz5NVBeFKEGAjSzO4HSllXd6XD+m3FxvZybLVt9AsmSu2uswJnNPgXywqXuPLI7TJ3PVQhny5ze9rnZsmaN9PYXK3BeVuCcXnuaqLTd5rNP2AkhLroseZ35rRjCsu460MS4/gGl8W8P5maAc3plvQC00SqgPp9bFt1dEHoXV8+eYS6r/fRGaEI4F+AYV2TYTnV2BjindaajIpQpuqSZ4YWhXZbhCxKbnrM7Bv4wpSHM5zbG7h9wePUZ7xI9HQOc0+ou6FnhDaHufhLVGqF9qgDH1JLuyqpucwgRYorxK89sZ+gyDHBOK7rQ3cIb0swznQrMRRlmZYDpnHbufl0d4LkqvU4PQhtlgHNa+O7vF97ifAj7uaX3YdEhBC4KcIwx9h7QpIYtC/DZSgiz6S5lZICzeXnRJHO38HbT2+1Cs6jwtgkMIcwVYVbMYIVAL8axk9Lu6at+4Z2tvAw/3vTO0hIGOKdu+TorpJ3C288w57vQzApvL7cLAzw3X8WiGM91oRdGN6b5rXYk/AUDnI8BzualToAbKNsg9brNofNPOnW44MOzwSwfBrN8vmpZjOOibvPSGKcnURYGOKfZuz+E0CwqufF8NW4fSrfPfNEM1uwXtfrzz90V2mbEWKefuiDAIcxKt7IwwNl8HmooQmB5bs9lOJXo1YldODZeEdr+LDTnTxT1B71zX1/a6G7SSgY4m+/AT2DZPQYL7Gqp8+94Z+TNDNlcm7/CeW1eyD4ywNIeM8C6KrvQGRngbHzf6+oMcDbXZuh4bV7IPjLA0h4zwLoSBwJ5GeCcfPfrigxwTo4edUUGWFfiMSgvA6wrcRSQlwHOyXe/rsgAS3vMAOd0DQaQ1+Al7DUDLO0xA5zZXg+D97rx14MBlvaYAc7MMaSuwgDr8jz6ZGeAdXmOgbMzwJmZAV2FAc7MXqiuwgDr8jz6ZGeAdUl2/neBAc7PJOjSDHB+dkR1aQZYl+RxZxcYYGmPGeD89nQMvKfNvmYMsLTHDHB+ezqY3NNmXzMGWNpjBngn7N14cu8afF0ZYGmPGeCd4HhSl2OAdRkecXaEAdZlOAbeEQZ4J5gHXY4BlvaYAd4Jezek3LsGS5IkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZIkSZKkpf4fJ9N6IfZu2twAAAAASUVORK5CYII=\n",
- "text/plain": [
- ""
- ]
- },
- "execution_count": 3,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "#@title A static model {vertical-output: true}\n",
- "\n",
- "static_model = \"\"\"\n",
- "\n",
- " \n",
- " \n",
- " \n",
- " \n",
- " \n",
- "\n",
- "\"\"\"\n",
- "physics = mujoco.Physics.from_xml_string(static_model)\n",
- "pixels = physics.render()\n",
- "PIL.Image.fromarray(pixels)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 4,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
- " and should_run_async(code)\n"
- ]
- }
- ],
- "source": [
- "contents = open(\"sawyer_pick_and_place.xml\").read()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
- " and should_run_async(code)\n"
- ]
- },
- {
- "data": {
- "text/plain": [
- "'\\n \\n \\n \\n\\n \\n \\n\\n \\n \\n \\n \\n\\n \\n \\n \\n \\n\\n \\n \\n \\n \\n \\n \\n \\n\\n'"
- ]
- },
- "execution_count": 5,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "contents"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
- " and should_run_async(code)\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUAAAADwCAIAAAD+Tyo8AAEAAElEQVR4nOz9abDkWXYfhv3Ouf98+dZ6tfYyPdP79Mz0LN2zEQRNgJC4QqBo2VTIBk2JYgRFQBGyFArZYckf7A+KUNiyQnaERNGQDJKyBJFSQBJFGaJAgAIBECBmOEtPb9PTS1X1Xl1Vr96+Zf7vOf5wlnvzVXXPgIS2COQA1e/ly/yfe889+3bp//p/+T/jd1+/+/rd1/88X/w/9gJ+9/W7r999/YO/fpeBf/f1u6//Gb9+l4F/9/W7r/8Zv4bPf+HZ+Fmh0PgBRFDV+Bt139HuZwJA/R/jk4tv3vWKRweEs8/8AQDFm903VO/6zCIgom5PDdY9oH/0pu4FyLHXPf/ux+ridz58Ux2gs5+5NyBVpfgYJaD4+ocC+n6buvuYzgDyTfWA7oL12wa0SHv4gbH3A9DeD3RM93jzo4/pQ2D9zhH5hwKiN66+ISKqCoWo/wBa+J+dDIFUVVSwQBX+r0JVQfFRXxcREZiZiFRVRFRUoaqqor4/AtnnuscBqqKiSvmYDpx9M94DNAARxwekVrGz+oE2JQL8AJvqYX3Epsi31TZlgAAV+e9zU/E4goqKiCIW34HzTTU83GNTKiL/k9mUUcQPdFK5qdg5k79U/H/fZ1O/E+QnKvIDk5+o6odtygEtbIryBVLVQarYHgQCbZRAzMxMhFTLcaa+dCLmUnILIkIEZvJDhXLAQsg9lRA3KQIIRMTEVNjWqApApapCKddSmMmtfTsCIir+ZOQB2wMNY7YpDQFnfzJA8aiG30QT88KmVNUUDscuOjgE8g8swIpPEHMpBd2GpUpbrX2AS2obUSVVZg7o6mgOgWjk5JCVNE6LmJiYGywAWrXamuwIuDBzgXEMoCK4G4EBTiAKgm0WAOXGzyDQibkqQX0xzMSlELGpHbkLgckODktA5PTNYFFR0kAgMZfGJQoRISYRJcI9KFAVqkmBtv4GjAmCiupwWeBcAyP2oECjQRUIiSZ7E3PhoEBHIJg4VNXCYbEwGBKfUVGBNAQyO1soFFqUajwQgO060HUPCuz4hohpQGcCpbxKMQBAgsjtXxMMRAxmRXKcml0kogaXmZHiSrsP4qwgcVgKhJTsYPl6FSRw/laDBVQ1WGBmAjuslBFG83C2bPop2UnEPp82hZJ/RRdhaQercEmBZLCY2KUsXAwHLFIVF8mLsNKiEoASOapoGsZhmYxwpQkQs5q5RMTgpE7ADgU9rECFAiyiINcnbtGJ1OAN4wXHWFAMpbtxBpaacHEM2GmFVoIIAHHZkAfRwyIiMEAJi5ndBgSImQI6wo5ehGXfgxCIBAAZD6vakUlN+1yJiC3KE0TBzIYbYyb0sERN3/vJutZSJSWnRBAz5ZGJCklukaiJ3hQKofDAVALPqaWImgLQBquSKpSZmBEmQIdAAGI0ZrAGo2klkDpPdxSjEnRsRpVJVjtshCQ0kmq6iIiIRIUZSkEwTgsGi1wz3AuWq3oRCuvPuA0fCQscrLkAK1QIETE18nQSDjoX9U8BqpBaTZjmvpOOjXVNHISECFiB30VY6GBBmnzxtRosYtbg3nie77zBsgNOEGm4dVJSQxto7lSVmW2hZ2Gpaq1pY/snE1a3jA+DFQZI4kEZbIdF8flcim3N3hfW8v1gIWzmBivYsAvV3AXLeFgEQR5CwmyaMr6LNGDh4oHCXg0MJyxDOwhaFQoTLiEgkNQIgEjENHkHKwxy0wf2HxjZJyyEmQMjD4LWClUykWSKMOjQYAkJudlAQwIgsyCkrb3tQaVKtYWbNrdVdhLRX+2MAa01tZ5zEhMUBKVOhPQvIog7by4amZkLGVvK94HV+RYNFkJHNc2X0JJFTSS5oUJ+NGjU2k5dnBMaLBNJ8YEAdwaUv6VmBIY1ZWcJqdVgJTWn3nY3rEOjpqTII+vIjYjMFVTAbERDFAGFSdRpDp0Qw1lYvYtmKkmbG7ZAGU6f9metqqwNKZ1/4YBUTSxCZNTGOw1WbMGE/AI1NhHKBK1SiYnBuBcoMnM6XrVWyKJzSaR3wTJ50ZEiAawkopWUmFiNqVJsda/8/SwsOxHyk0N4JWRugzYkEkFAKlq1GiyoEDN6k62DbYsRoqFwhBPsfxyriWO2I2cwCCAws4lqXRDBADwE4kvneG5QrwBsNBd7VgDcWeCmZOxtMMIxM5oCKcRpNRWCg3PTISRZGIm5Ft8aIXff/s+s/cA1M4UJ3UQrGm8vbi1kkG8tBTNUuTMETBhk+ACkqobNkJ12TEje/QG21rmGgJZua/EvgyPgmuSaxnucWUDJf++xNaKFrfV2h7peoVhRaGQT9m1rPX9+n62hOzUOPzDErauQwiF7xaR8kFka/azaojU/6KmVBsvEDROBi3slUFKoFpebUJVG4Lm7D90aLW5Nz2wNYRgTVAVCldz7i4gNVMFsdJtoHOKPZPIutRyF1gDAxEpqP9gh5Q8mFhAKPWmyISaozPGWSQgykkYPzuSVnYft177O7H5HL0SpMV6HQxfYrlhC/5BCz2wtSSlMGFNESkxMZGEckAJkgSV2LxFtYyny1SWzL84cvLu2lpLCDUJVEacrIbEHhx/TeKZRR7+7WAkTSRqVAS5hBT0jaZ04uIrd6GRq7pvRJUI8AwtayRYmzqRta2m/pAwRFRYmosLFYkHqoRM4Ucb6NWEGAxomgxkAVblra6Qk6v6nqKA6AYLJggiJSQpMErqtoYFLEiIigkrsWGNrpkANwaIKEXM2mVkZao6kcwL1LHB2ay43Y2su4n2z9mYphUJYwMIZUCa2Xaua+97wQ0SDiPTHV8iceyggUhvLIEPyIchSwgWBIr2CXPJdr6AWghWROD0BQA1wbtA0Sk4QIedTai6CSzGRr4BFyK0FTqtUyiDH3eCMfM1g+O2AI+IFTCa4WhXKzOkdELPtMwCpLdfBpfXeqdq7wDUpfzc4e2ScNMPFRwjcwE5KOlX18PO9wPXq2ck0PmkvUbHYeANHLv/MU70bnH4UuMZ7JcG5eWFerisyU0RgNs3AzCrarK8AJ99ndw3e3eDI4ixudaAHR+VDwFkw8vuBSzszwRGTJSbzs/YFZ7ZiBOg05hpY1SiPkmYyAmPATAyUUqjXsW5QqZpnJYLYBbTLGBlaOqfhLDjHkjYEAURcSmlWa5KKn7tEzPAsOO0+j7DKmEHE6tZfYHQBHJUynAEX0Bo4/EDg1MVzc8zS26EIaVLhYoH6e4GLxGAwZCqKu8CpqrpT0/yPBs7IgkDFHZ+7vm5C6QcApwoLqlt6EZ4iatv23cFZtUQawn5PcKpqmUsQwIB04FzrUgovcsOOiCjSqej2Fr8Rld5AZiyAM263iENb9PcBp5osG0olMEuE0knqu8CZ+Q0i8khVqPTkhY8ARyAmVk4OVICKfXHhOBzc4HJnIQ6gCo3UiD8xIblOWvT4lZmNv0Qy20Tkaa/eVrFdxAri60BL55iVlWakry4oxo+PqSTjmxXjSpuZydKAnWWiact06iL1W8RqUg/Drdl4AoOluLlYxYVralGO7eQGTfMY8sN26j3HMN7DcmvgCFCwhj0dqiPxaSnf2EsvEx0xkZANPl40gggWg3A6iMSpmf7+0aqimvgEcyRaF0VwIMwkVm8jgsiD8KnUwmV0oqRC4l/SGmhJ++4MPjW8oUa7nfbwXUUMxR6S+FQ3yT11DJgXi3vgswfXE6p6JoeUMnHs+R7WbnuU0YGovzDVoVp1EZ9E4MbV9wBHZEHv7nCr1MLFdmYb5MixD8YkrnXRsm3t0eGWmylqckEg6VaQ2VRd4sTxGGSqgf50N/sNNF5y6eECoyfOcHj8IMkW7HzozlJEkCNQ00OMzQEt9O8b1G6DeYIassTxCnZfJVRbj5CPBtdv8O5XxDIyQ2RMWJi1Z0AKfdX7JR3EpL5+g8hHBE2FCok/uhVGrqdDnCmFdUOd9dfpbkWEefoNtlX4biNugT7sZfgs5lUqEVXPJjqI8BlCprhu6XGm2udKEORRSumWkCsnIioRKy3M1ZK8fnYqUZzTwKXUSJzl7uKNWisiLILFDQYlmc9fwFU6cy/lUUJs4OJUZbHeyfitai0o0bvQNjjYr30GUhfrJZuij7zL3WfavkIe6EqN3+88yU0Xabo3bMgdw8YJCXHRfkBqwkbK1EqvFiEuSCX7QdRSzT33xlEEPTWijRXnBtOKW1Rzzi5n0JiPXTT9/E9WeEFn7EK16pEUTAjxdA9BQG17HQJU1ajT/+LHlls0F7UTORDpNoh7bjDAdUhFWGQ9CakISlEoafensLEbThqluDA9A862QN3B9eych+ibNSe8LUaTkoxdRAGirpIMqWMdWCRv2yF6VIxgmblgKsvF+t6TxzuUkpn9Foqgpi0MhPYQiRCFKA2lVh/qGQz3ZgqKG+dhYQz5HaPjKlVERCWVjAKiwliwNHpKE5EaoT/E0rsoRxwWmtXXW6G1Vmen1NiKKnLGlFqA2NDqT2/GWqAmMH1viO4gOC4s3GexmR4izkDs3/GYTkBMgk5RcPceNVWcaq3Vkv4GsnFvyDxzvM8wO3EYqCHFvNirw9UCxBDDDrGwn0tEcxIiooTrLES6N8SkNoSIyUpjyhJxVS4tmZiISw517+AjIDYxhAWI3R7tmapaxyokd0GMtLj9cgaiwtOj5IylWasbEGNVRERataoVRJCji2gBovFwQEzl0eseYiAKIRrEMLM6PBCDhcSirdA4IKY+RTvkOog8/OWKoilOEYsv5nPdDTODrRW+pA3RnwdTK9fW9H67Yr3AacheVSUBGitQJzJded8TYggzjgIgBD8lRFPRpqbaHlWqLYU/BKI6xJT8xva0CLGeUUFBjgifLSGKCohElcXSCX5awb0GUc9CNNPRXP2QO0ZtnZEc5AgPohgZmqchhnb2xXjpVbNFvx9EojMQ/QgiKukQAYHSXRBhNqf2FPJ9IAKoLaJLac3YoyLUo2ZF2/k6xICQla0ef+qOZgFFHmxxcjWBvmCd0ALEnqIss5MgFyC2GNlHQgSkVkWLGtoX0kS3fJJDVPKMIBGAoUa9lNTa+Wualk265khBFSEyo7MzUgphPRKImLVWZ+AmpdQddony4OAxZs92iCghWysg7CkKhRfeopPEJoADIkGCsttLoSCXJc0sEREPrduvVcOmXnACG2u60ja612oQiYhZas3uH+22aRZZfFtFrLLPA3viOpLIC2AZWffb2SxhWyzIvtYv5DuMrRJJ1vJ6PskDx3Fopncd4UZq0rgxIAZHGW9YeVAr2GoQTeU6vl0NqJZSKB6oHjAh1RphTaS1H4ilXJYkYl2piNyFWE8EUOeeUFR0xw4BWGA8Ms/aYgA9DwfC7ZPidRsd/aj2bKPxV1FlIrB72L5+S6R5T0VADMTeDbFjmUWI8OYNFweqqlpFLJ6srr8x1KytWzy/9uA4GcT6ej0TAi+DkprbUwXCyLEMuD3H+ChXmU9zeZNfj60S+XopGBAZ5Q6dHIoLqO2JaYU6K9azEMmrC8gNBLhnq3EMZ7dpf9fmAWKRImybxsZWJokOV/kDdXuME3XSj02FRFs4hw6xjW+bDE0OOAMRnT7plY+TyL22ibBdc5sjaj5wYZu0EPNvnxGx5NaC8gGU1IsN81EBNMUcFIKkkQZUzYshIsIYuO2J1isLUnqlAljcpmozAxKo/TDWeuZNdQXLei+iVaIShNTbg81DjvguuuLfDuGtUjI5xbZp+QIdtV+lY8wThxBVL6XMDZx5uchxuX22WJSotZ24GetisfMQKPysqF8nooH4TNijf3GXNuyBaldB1gMNY9ZRTmHTFmLlDGq2/MQZoBqVFS4GIhbS77R11ywEoTwpgSBl36Z/jCKrcQ+gCwLI6FDv+qsp3ha2pCRaaSKMzCBK3FLoa9VFw+csUO25vd8mkVsPvmET9SIpN21tCZRN49xrmxmeyAO1Y8qnc3eaicbGl52wJgClJNBWAnwGaFet18l1Jx4H2nIc98StB8a8+Clo2jKU1HVNNJhnNIo2uPbm3dsMlEPhiQN7grkqDbdn4scdRPv+sDRdqrVW7woOCuhe1Z8OZifjsOFhALmwVJFqDQ/x9PAKFJppZG8XBhHhI4CCGv+X4skTCS/fnXgmB1qrNVKpe9muZ1qIqAc61oy33QUUEhnsHwwoiLp6FQOavbIJFKi11p7623o6oFb5AKuDvQvoKCILQNMKJdAiUCcTMaCUMKPO4aOBmvMZQKtUqQICKYQzLKIE8q5gW6UD1VqldR1F5UASsef8HKgblwtAiQy31kkbUawA6nKC/QsNaEdIHVDvHLTwZBSf3AMoyLYpIiaQzdFNoIWifimAKlTuBdReDWhJndGApgchInWsqgpSc0+c74EPASp1FGk2iCfOhzpWEfEiGyKKiqum1vPTcMel79QHUMeq4pkxhAhK0ds3KOff62jhMrMziPlDgaabZLmppkVHdZoGXEsHUBCswb0HCiCCz+ZfwXr3uQNKTDCvJoDGSYd6uRso8H2AZlaWTIo7v6VRsQg0lbnz9z2AplXiso656zW3VxThpBS3SGlLrd8LqBeNO1Bz+6Oj3bpNjQ5TshTuRwikPavu/UCZmTg8YWvt6WpGjP0aUOukSyIGEXnBs4sM73UvPX6bD6Ji+sz718IF6/y1ewPtja17AgXRGaD2SbZPEllpakQTGtDGtwYUYRl5ZBHEpNXdYEtQaZTug0B9yyUAD/aTZAsFiJgHI2uz1dR5fdExUA8nIjffmfIZEELa4V2NGNx8RtRgu8PmxYn2PRNI8Lw/MoLTAUV00jfrRf3kMpDThBJ8N1nP1PfThx3ifkHryA6DswfKxNYC7QJZFUA2oCuanZw4icIALA4nEARRiCiCf9Ph74ESkc8n6CxALsXCOc4MIfTtW+LKFhlyS+ggFSFQq3iHRw20c0Udw4hCnbbTrBEgWJyc4lyrB3UWMOxsTyrCIGUsOJ/WQ16xiOEo5PDKMGLlBEoUfOVqXCQIvGEYQXgVZHTUjOT4a04UgPNYfsDZxjb7IUCrvdfTcJyOVBArVNHxuYc57oHh9hsIVEoJm4WsvSQB26Gix3BIGuuWNxiDa04LrsKTE06OQabamZ0gaOqP8Nycz9ODp4jCxcu8rcCzmMa2vh/n41otJCoZOgkr2hkV0he+dG6ch0H9WLJpIwSRRgjBaVHcLlJFH463PbbREAaUJDeF3ovr3iXvSqBWURb1em0rVRBGHURqUEz7ROzFH8y4e7PunsaLmYgZmkerqk2G2lciAcMQVV0oHrp7s1Vrn2CMg3OJqIHpxc1qzmzQiLcaUObmTBO11jFtsq9R+tnNni36BhFxYYQw1BZ8aAxSg2451kXdZjUSEw6XdWGzsIALIopJnctDli4BYrNNnWutQpaCNKDxsr237hf7tJiDVhbYhxbjKmhcq6roFw03hzRSA2qFHFYNb+xk7UEKIGy/dEeTeiTm9/hugoozhrRAAf5zwDdSCoPQtWYYC5ZSTqDoYg/2NbdIEoVEYIYXQ58F2vOayQUTXQ3J7km6Zjd0G1crfH5KRD7NTnMr0hCckCTKaBbhOv1aqVWAtDUT0GyHBSR3Gh6w6AfOwJVAqXbB1WYMR1hYkqQ6s1zRlFIvJV3qt6rd9kmTjM1YsP453APJQKT60aiOYvYDGjG4jHWggcAFJMd2cyKKVCHuN4sAS1YVpd2iAIWVo8KPWJMAqHvH9UNHVH5KQWKqxnWd0dA2a5azz7pQinkd3Spyv6EqGpKBBSayZDlRRtIyEbVAUURgJUDqqKLMPMC9+YgiKMLeS8WARVll2sB9IUuW2G6jUHtRPweKkiEXB27YiC+cgdsOZ0H7gUCkNjoN6dpYgdHC6WlXAe+P0xr2svFw8ZaaBhd5ggnXGMykpKepO7g9igGwuTcdXLPkNeaVmdBorTyacLEIt4ltaxNahFvOwKV+v4H8kMP5mbuD8HTXfv1v6u6a56VDevAwUGPO6OwJTDUnk1wPc3ZXRgQrd7qgrDQkjfrEimwYNjFqxKcpnhbguojXeJTVLZF68UifoO7/bQQJL388CzfiPg63nIVrB0NQEoJGN6XqEMnOtEgZfeKsO9zW1hkNKn4cHuww5WxFHItgGSjjWEVk8FmY5uwRmOBWSodBcqs4mgzyYW4iaYjpxi9ObKVZsWFTwh0MhG6w7B1D1OsJzsIlssFl1MENWsxpa/eCC68zNrgMyrAsAIW16S/AxRm43Dm4vw24lnp1wcbgnG4DhEqzWRNVJfVmz0uZPQB1cKGRGaaUj4C4RxPa3JFMhUqlmgsWBSdcCT0XmmQBbtMwDW4ur/lNrJKAkyiJChezsEzdiZiJ5wNoQ0B4YRCiCd5I62646EaCOAI5iCQdN7NUSpFa0zMyuB5hUQ/95KQOP+iA21Txbxsu00C1VhW1Ux6rReg/HO6HsFK4SmdZiWJOiMG1zzPxZCARGUx9maLwPgSrtY7VE1HJAZShVWKvjYw0Y8pevtPVYcfR+q/EpbgeBGAjqcxpQLCQYycnemmGKJE2ShquLi+dRLKKyb7U0glk7geZnvKQFRNbU2oKZ7OLOGxIR92Hw00t0lVPRfVICFd/NlOuxJwWIfEqwxAakWJJXagARSV8HmPIaSvgakrVthzhEMQRUVg+AVdJDbQL0jCytFk6GpQQJJv0FKiGIgcaG6qdTLuXwgnbtqzMIpLRGtOviWpZhOvPzVLhRDW61ucIDRCRj87QXFpUIjAXisEg8Obk5NyEqwBaJbtCe7guYNWDrw0V5iE3JodXFhuXFioekOv4tFF1xx79ltXCFd2WfTFJihFAIaLBRu/aGhP7LdSRDkM6pW6pSTAx8pM5u8fUnTeIk6unDGZ2lE3uZ4bn09oIe8vZ66jDrugoJHSmlXobk3UTFRBSrYaScw+4hXCzEqilfxxj/ZaJTJq6iZjtb2SlRiZuDa7a2kOoVYlpe7S45ex2uNeWvUCnbdnBNUVn+X4K27gt3kEbXBDY543bpziUtCKExcKWg0ANgZ65BSiD8wQG16pW4lf6MUOxZV+4IreMKBsy8mOK8FJnFvZbltzyAn0FOX/4lgmUpdRO08wpgM5sOUnI7YIAHbIlqlzMSojkU+GzcK37UkKW+dqMunLlC8aFWz3eRJA4SJf97i0nVS+CBmEYhoHafrrYhKqLk8XHejjOHkLdCSSSYnZK5t9CQDmWFBqJ34UJMm0XapJMkdMJ1YSQFzw2dOSSyHFHTO3AIjjrUQqz15mLjQvvbaYOtBj/NB8rR1TaaTXlhozEqOny9KODmCP7p1Fn6/3xTdAvgo7hERQSTVqeK04/tmzDPRZLFNr/KGp63JwuvIDt/qChqqh20BTh0zSg/DMBOtsGRL0OohsBqa7hGJqdd1QyN/4hB13joPOd9EpcuwZ9qbbQSdoLyBVaKijCC8xUci78vUBnGIJivHHXLdhhW2G4CLhMFNgmX6VFgLPlwAaSUM9yZ0EHzkJM9Bs5s2WTLFmioqHEFGDimCPTkWs7PHXbyEbheVQzsJpGSNpR6CwT0Rz3FZQeB+NVAO57L4KWRoLo1LXl+z4EdDtoMwL8po9F0FCQgjzWTh8G2iZFqAKk9rQ+PowetEpzJcI9t6guRc7M2ap9IiTxXaBtieSzI9R6k6R+GGhF9AckpalKVeUF0E1SI0BTmIM9aECZqIpa9YVqA0333jXSoUPguN81QlIn4d4TtEBMRJuwZua8/oNwF2g/azRGdmslQcOklX9GcvbnImgjcHjtg7dAm22fRkAsmiO3EqABqPNw2idxxuT2vbp4pe64z4Jmj0Owt0/0duVdoJEc7o00CRoYFMopSVICWKWOqpeGa5UaIXh2NshZJNkCYpAycVXccbcxO5lybKgH4SzoJmS0eFW6tSeLWW+NA02t3RM0UfH2MWcNQ1ByTeCiUX+SWDHq8LJBjYINn6mf1bAA1Kd0fChop9E4mwSt0KgicOZ3mzBBpyRIUo0qwgANxHQz7UAP7ixbQjubHBylRlrM3BgPRF10OkHnK3VLB5ozGKFm95rr7n9VIdXatGiSXoJWowHfdUH8KZSTywOwYZxKgu7OWj2XuQBao0TUdaOdHzNZDy1Yw4UCEjSbiQ0FE2pYcxbSZa96bX61bUzimoEPBR0ZeD+2e4L2eIGBpho12AGa3OXpGPieoAfK5qbQJwuCPYb9gMAl4zEWKBdQo8GUNAYy8/wa1qxtPuw4J8oOdPAU+XQChMdrFkTTxj7OTnOFqSeCQ6IwqNFQO3iKF1xWStt4aG0jL1XttpWgwxD4ENAah5cCy8xgC33xR4BObrZ1hi8V54fqLa/IFXLT/A56AecmCcM+KsUHGp8F3XBOQNj62lwPVa0qCcpIkfvV+onnPVOufjUS/mUY+rDq3TjPPTEXBTgTYA7a8dNjqpfFPc5zSVWqqhZYgRR1tusCnVPbR+GyUOAgUWwWH0Kuw9SOLr6cbDp1ZVN+TC71VYDdRuzhXAai2opzxW3B/FC3cUeMb3wIo9lktXYUo9pFVpPo41HxHHSvNJQtNde0k3SCLRJ7vtTWGBmjDOJI1MNXiAuHepbx7zqVug2JxcIVIhKVpn5bMtu+3cZiWjlrZ6F1G+d7bhxIK99Au7o/GxZo9XBp31hfV9r23RPPbLzHeX+UAT2dQI2Nd4P//UFqOI/j8i+GRHN1SN9n4w3nSNK958bJ6THr/uxNV6kdY6t+5MbR2GZx4+mSUG68pzfqRkSEda85T8ZDawq7WSmaGI1EI3oMSnkBxJSWoBj/tA/u8TX35+NnGqBtGB3C6HAsm1LqDJR8juOw0RuS1OHUvrhxxEgd+7TvKFgrJyEY3owanJfULCj/tOZgis52Yu+NDhZyy9yv4es5nLojs7ekK5rxPav3cyX0Lsri2dGMXlq0wYf1SFwMB88neWg0vVMsMpi5ZKppUNg6Pd9GJiBVqyTPsweW28bjVKRqJaXCxZwBM6GCCF2dN7PcFFTc99PzRikcZOFywe0F6xGjoG0CM9/41reOXnyRie688Dwp5irnPvf5i1/4wv1f/jKCdjpVDpee8FQtdRtXN337Q7cBywRVH8rZ1WJQjFZPpURKUkVJs4SwdzIDdszuQhKhozMT4maNRUBRsWjOaNRjWs9WRAMBIq1Vqd38lMoj5EMeNRr0iFp6A56fs4Wp0A49BIaJWv9ByYpJVFDAo1buKtdNeKcjoYmNjtcaXzToCxIfWWSioNffeD2CKk0gikir5usEDGX2LIRXxniMnoLnUbioWqtXrbX2M6Vt80Z3fsY/GPRWEPbbhG61ov9Thp7i7KOhNwr7EOjf/Q//yvWf+08ur65OhyFEqpPfbD4/ms0+82/8G5e/9CyBrELwDPQqtfNo7gGdg9A/ZO9kgY/vv/cOesL6aOj5a9gWZ6FTZEe/P9X99wbdjm8cxxB8lH6h5YXNZLyb6v6BodOrr71qw5Eoz0ZrjqVtH6WQi+ZNkc9Jk37WAVwhMzMXts6BnDzmNYSdMxMbazES2FzimuUBTZ0mdKcJYBzHu6ErwMylsKhoVUPoGeiJDgL6CA2AtBca9EUST2Faa63dzRUaC2YruzdWrCK9MbwIHfAzzs4YgUoV04H3hE7R5iUiYx0Xoesrf+U/vPnX/trlzU1DV08fRhqqun1w8MA/9U898Wf+DKAZoAoEqi34HtABdE1mqmo02iNf4wNsXdPZAnnvvYP6ejMXCjVVX094CLpKwhvrqHIXdGvBDS6yQDqycCKAJvX1ZH834d1N9kZ4CtR7Ed4/JNnfg/DuIntb0lirWMYbADC4KcxeW5txV7cnPA7nxVX2ZrUBXKbrmxugadhLrag2sjunE5YzJOVulNf6RCO+assqIh3XZkzCZkFJxvzC4AhiUmugj/v+DI9EVKg1xDTofq1Ufje37iaOp/0sxhj4cYrxqsDeCwIUVbVG3EhqNZdyGIag3S5DFk2w6K4BJ0CjzJDN3skWP7iShOatCA387e98592f+7nLGxuTUhKB45NPaq2Ta9cS82tLS9f+o//oQOTZP/tnVRUC5QW/ndr0Mx+KALSaFoOuZ8++hY7GzCl2HRrGeKYxEAG2nDIVXqK5FKGE7AteJ++7tWPVcBf6/Lw9exwrUB181xLTrCft/r/GhaAdd0W9SvgXLmndbTET2qWhNGFj6BqN7P2tMFJi7hrlh8OdrVRt+9rVCxiRB0LMVXF32klMPRpnuxhqrSbRg4QQrkhE7hSSPjOArsTMONTQu8Cf7jN0o5tVYRHFnju7H7sN+gma+dfq1DS87TxiQNCC2BQRlHwld7u9ELMsz8CvqFkNpSn/LCvrlNIJilhLLC/GBYKagI1XUrBIJWJ0SA6SAUB2BHevWajzJ8PFTmpmG14XC3r3m98s8JtK81qTcu2aaly5ZEPYgEI0Sh3HEUG2C85kUCQT1VpRF1aV2w+pKnEgnZ131/YBsGpSRW7fJP5Ilc+cviGNvKyqgXcb0og702UaQdAF6Aq/4NaEOJ8p8/xI8rO9aG3hqDPk5/gM8ju7d2o044fVNWtoiG8nP6r9V+0rZP0PXbjbua6h27lvoIyzmd2FFnnTltTWFA/qESPA2yZJGyl2uOj6UVK/WVlfBCQ0tOCCddXWk5rZs0mNilNYWJ1Rzw9mM7jc6fw053luWOkXgPiXsdhECuWIU+QzbQaKbyrUflJk2292TdoC0irLA05Nj24BPTH51MLAuyFfUlaRheVsAcfAscjK6ekp8+raWilFg/jhlobOT0+Pj49PRJZBpZS7F8CLbHiPBdgh5B45mqQa/t30BHweKbpuG6KGDWfUxH8uoCNuzQY9/7jxangTsYBFAjCnQFUhpfQEYIuO1d1FAPdcQCOAVGaSTizDyL+nQJeGGhGNtgD1CpDF+cFYeN21APQLMDIUxxszAzwsLS014nD0Z+2Luq/Vnhb/KEBxM2UygGFIo3IjfbbQAIlKC9kXju4CswljAW3yqC3AdUXYn+nzUDqwbQEcyc+qYnfbRhwFQXgxtECVzixAlflDF+ASlKJ9UrV5cS5zFhZgxHvPBcDMt49cgOdi+wWwy6ooLVtYwJzw1unpQITjYx3H6dLSZDIpwzBMJvP5fHZ6enJ6ejyb7c7nb89mn//yl8kqHArZII6PXsB8HCkXIM0KoDBByZxVimWq9ahAauWkqLuOAIYB/kFp4PstwLnTrO5oAXfCK99vAWe44O4FGAhWNq2bdmhyTagjX4DKgkPQgvC+ADdffpAFVKl1rMbcSpoLIGAIweGlElFx6a5mrTW1cWhCENw55Kg1TQ0MjSlQImTTmCys0Y84gXv20dcKWBYKBEIrtLI0TNeBHe6TyVGfQXX3AkSr+SwcB9zHA20rXIqqljhLRBoMWFiALraAI2Q/cgHd1nId0sZwh1Xi6RYKveILaLarjS+mjgRirbIYK+orSZzPuNiy90XqZz97/fnnH55MCLDG1NnpKYiO5vO9WndV353NeDKpuMcCOHw+UwCaZlcEn30NZxZAzC0/lCfR3JxGta2rXElBpYijiAhQiote4Uuw/2iHc/97p544DvgMGTBIYKZpMxCYmEoclqovIDdMweBoL4o2IUpbg9oeU2DcTYcFVKkSM0XUAOmKh/FofJ6A7rEAdXuBiYUzfEDZnwrQEKNt2IvdjfY9qd6CcgvWExMTm0TRBsultapKFY2SngjlIzxo99OsMzXuwlsISHpjins3ru+zthAh87iUM46NiJcgWkyP41MmHPKDbtgwA/ibf+X/lRjsOQdhfJ6xcvoYh/+04FnFyaom5ZHzcfugqgbm3Npu5NC/OvLTe5lcuWAFXvw7/52ovvKNb6wxX93bu7y8XFUt312B4/VpOTm5NZsdicxXhjde/s3XX/yNoEc/GSMuBxpWnKu6WJeGN6ghVgi4fvPTj9z3vc7qjN0gCaT1XC1iUoNiox4rMRACIHjYf49vudWq0D/yT//vM7mVXoMm/s0itIvUI63qPrt63YiAOUJlREhBE9XagEccUig5KQe+XPc3VHn3n79fpTIoro5PSiDyYS1amQsQWl2ZOW8yyhPPBRiTg7wMY4ACMWpfF+mVcpA6kGxjowkQGXN7r0YZnf3rBXSlqKLWmkEUdM+3BEsC1nBMMlqDkG/R7WWio5saLa6fiUkQgxRDbapohTKXWqXNPnBgSgBE3rn+2ntXvxfOU5PrjQ6DzMJ17Qyl+NdIOm9eISvwDr6IElUvIfVsQe8uBXM2LyoJ3xmpLazVtwTR+xcJo1QFGKjATPXWycnl1dXZOJ7M56NqPTmpwAiI6vknHnz71ZcXd5v7dU8oo0nN4On50b8i12984a/+yv9jZQWX1p773/3B/1OScyCHukd0vJn+f3uzk10adJIxGGq2AVw8tlU896t/89k/8OPd8cQtTRHdFFELTTeNGd+lYOPRfgVUUaVyKXlZcqyTSilVBDms187aW3cEY8Rr2YsuJMZiEKhWqVWgkdOKNfhKRGpIHwBW95JmRToLFteQDKyUQmZCB3FIi1SpEjGiMT31ni/XvJV4eo6+M7GXmZvQ6ECtFBm8ps1jWYgGrTPKHO3gDZeRh/RyLp/LE8o5xJU2CULc9g9Vs/RSr4rqS3/3l4Zh4mVGC//vdNZzdTzHMa3xSSYCqO/J7B/QK6v2ilpie2DX/kpn+WrxWyEWmj+R0u78g1duvXEj2BkAto+O6AyhAEuXzq0/eJ+vteMr7Z+VhmzI18YvqQuhxPQbL/+vV1awuoobO8/+1iv/5I998Rfiywk4RF8A6d5sr7ttncauZ+M8C19TYBgGIy2CjTfIYtD2FI3b5OAcrv2VOncTJEzrdI6MESQTWXeErZe6uEsVh5qJRnT2v31jrOOAuNDXKqUp2yDOli1rJF/tJXlhKDOHHQTVQVRZJJg6EKlQG7DBbArQ4npVajsItWk6iCUCdkOcCDMPgw/rISYQW1QDqR7DBQ/7niGtZh2dxUjkY+tMXYvEzcQpp7s15FGpKjENxa8vJ2ZQNJR3azja2drYvABa8F8SfiOlnlUE2SqCBbrOz3ei29W3/9M+LQrt70wg9AbmQgvw4nocn2lMKDwZqbI/67kkR6f5QZNVJOuVL356ZWODEmFtF8kpGv+6SZTLIaLbO/ttJ1Wvv7++vIzTU4yjfPqxd9bWNxbXHOax4zGkRocLt1UWkphnUdqkkD0sSmgJANPV7/zW537/H/ZvOjiNA1LEzVsgJycCuJRGD7F5jV4fApRZbG52qAH7ujZbMO6mknZVdkMi+RqcdX3Ej6ioZd0J3oS8IJyCjQ1TZjN79CH8MM0iX3u4yKCq1Ws4VWEOQJTdmnnNVNBOPGVVM2oI7jKriLM9ReiK/S+JqDiYdjWGcV1oe18t5xqwsIa07+5eQwTxok3KBg95ed1ieSIAbL33ZlWhLCw/oyMRE0VSWhnJ8OLJRxTuLNHFKz/bvAizLX22u+FDUH0FeZF5mo5eh+9/s9Blz+MeLbDqOWuZbenXeJopzo3p9NLy9LCVO6W4WVg4N65Dzs6wN66c37y1s5uf/F8881/+xneWTk4u4as//9hDb0jvLjcXRPM3ZKgYYaRj8Svaf9SpLqYYSyjC7isCAK987Vc/83t/rCduaKRwidjK8lSrVLtiIM+w2T1MkIVp20aTgA9SQ5JlYq512KCRZaCWMjhCpERlKDpXURnHcbI0IatybYofiEpMjq8bR9bwe0PeLqwBTIPp8WjWiNW7zIqkbChDxzi7jtYgbluH1LhUhnJgSkgKjd7xWG5uvNV6kpM2paOv4T92ZGbLER+XJ+3QnXnd8g1DI5ATfc2Jsxd+/RdPDvc7NZive1mxlNx5lkkbp7i+XSRHg2ZJNQa8BTCsw17lBOhmgTYHcYHGmyMSUUYCnx6f1JCGOYGpZ45T1afPnz84OT4pY6iVhYU7m7vIzJPyHtI0j9aXCqC3d/cBfPWpX6+f/42v/Zu/tPbg2l/8zvKf/Xd/f+yn0XG/EZzZ7qKFTHf/h84u0/+0KI3rOC9cxK3ACEcEtZgYV1WpUqmiFMvkg0L4SQPltnG+8BGUmZ9wtDFBSQVBmcHCTCRKpRSzDetYUQBFKVx8JrzPJUmVlmfniG+L8UWEAUqDJzg83mAbdy5gak8isu2mb0oKydoKE3qu94qnPVW0VZksGkYckx+ZOUY7NVZQxLXueZrNMEkmtVWwRsYP0QbEPubSJUOtnXFOwVbAjWuvYkEP+OFpxO6DWNJbPKMa7qFv4WgN8ovFu/vil890D+zmWnRyIM8KiGLPWFh8OKnGdVyVOlrMWRYo3QGS6vTixkNra98VkSrQ/BSpk3yv+xy7TJSLPvM6v7psa77xh39remG6gpX3P7P/333xpR/7+lO5sm4NFESFCASGYnC9lWE8dLg7a0SgybIeRXjuV37hmR/9YynuQUDsySh/Mhnm8xEIcGbGpL4yPMfp0UcTpyqBcjhILpJyiWYDczFo1gieuUxrH3DLtEbbGbcUtwZZ2Nr6oFfPpwRSkEIHVbHV2zryLAk2E8eroWEhUCf/rBglJtYI+jlbxjBhc/BNe1YVK05U9yviojTAfDPyhAHFMsSq1RVQr+iCJeXSge+XUagNQ+eoRUjhYt64hSKIqBDdePP1EK9+2W8j+rMqAz1lx/tp1yLs09yNLnjHMIhIkdBOCDbG0d5KE1+DSBAD0tLTaLYzcQgBJ361aWsLtxX167+wfvH+i3QchJFCwKwV5I/dbu/ySG/vHQK4fG4tkAEA13VreWe5Xqx1t17nO0HnzWLQCCfnwE1LliSOqOuaCk2xYHdryMQuFh0UG4/5zq/+N1/40R9Hdx+Ab62wqEIsJ+L6yRUmIoBKlroHWsVe10foPmYyUagAUcSFAamQQETiJSJJVUaAKhKZV1XVCPQTAFYmIiUVaOkkqTF94rFKW4Z9hLyhH/A5KGHzBBMTASKaxn4h1t60Mb0Sd7ma0LKbu3L2Ylzd4kFwU+j5bZG4KMfb0yXkqIGOq9lNmkapgY+5XVxGHIOGB86qvgwPNtgymBV46Td/mUqLwOejepGf/1Xn14bKxc/4EtRCSiSAlcsFxaVCp0B8GhTtaQvDiePeKPNM4BIaILS7whw6D2aUc5nYG1OiJWa6uIE7+wSf+Hl4fu3czuEpLYUUstsMF52rbo+J1S6PiFIGANuHp7buS5vrAP6Fn8O/u3Fyct8JbuCf/DtXaGNiBngq/7TKNb31bjqF4YY7PYHOxnBqRJqBUYTYCUgrT7vx5tVncijvIm0UogplZZ/57l2HPl+23kUb/UuzKiEETFSfp9erAKoIEwGR6u1d5STRUqpUVpa42MTvuGNWtAhrLGPhkD9iGUMX9W6Gioe/fcgbQazoJ5q+Q4okIWaPI5fiN1am0RG1FuRuvV+loV0HBhaC7y72NM7O0t0gULtrvh1hRokKs9S4HSdGT2ouQ5tQ+uD66zffuhqWHDoma3ZQO0LT6kHnGvqgtxLhFdlAIdLsl3ZbbPF5INgskDQgmu5PuyWMp6Z10A1bs4EC/hXHKjbvv3ArYmCqSnf2OWCXy5tL952v128KgXeOyrnLncuAEBVx/VqQTdq5hh4G3Xdhc2v3ACGttvaOoPp//Jv4k68wVic/+t35X/7H7nt/06dbtboVN/sIpGAUEGtR8t0XZsqawUQE+fgxr+5EylczSxyLFlixHd98+2qttZTMnTpmzX1hYh4IYwyHKTT4bR4gIpSS5MELk2RUYbMNtQ86hFHi9GTcIHFvEi1OkIwjU7V7dic8jqNVGnHhzGa56G7L4LuWQfBocYt9aOaBU17649K8gUssq4yVKqLCmbkBiKhWd405xi6FnWbs4HdzSgT0LZ4OAnkeTLWddSwD5rihe6M5EiIy1kox2iKWUU2mWHUhda/GOVBVZeZhKD29BORGvS0KnTLNRhY2kUDdF8WZIA07m36crbaaDSjOIkTha0d7x2JwBxELaoIiIceCzGBZwNzdrxvz+YPAwa1dvnEHH/sYOhssFN1Cp3SgmzTCMf37l8+fA+j27h75faP6i599/MdfurqB028ANx+6UkzMmcWEkEhweaCwS7pzV4tStAkN27gWCmOBYKnMPDC78EI7lfnSb/7tL/zIHyGfJVqriGvaPE67xNP6fv0ekp5Qw/aMSkT/r5UJMjvb9BlB5xY+I6zgnESqMnatZvYfJq6oiIqR0B5O4+gINdwjKw9WZqJiA2LTRzAGDrvCEdl5EYnj9DCLXc5BYRCNXnFZShlCnHTL9cPQQJB5p5XUyNaKH0U13ZFWNhw+YTpRiDiaqkbHnGZYQmIOU1/C0lkC8VK88Ou/uGCEdT/3OyfpabqzZvovhlVsgDiJa/EVS9DuUdZMwu3BEla4R5GbeeuIXKADCo/Yo5gpdfss8q1xJGCrVjCLGhEwDwOg98o2JxJS81BOg+tsAVw+fw7A7d19qL7y+af2Hv84v3f75CtPd1tMERXbNxbyWzCkmcVnYWuMls+dIli/pnuoCtSz37751muqfzgLHiJb79FQUS8c1I627U/c04ijngnKhZXDAQaESP2ulFAMTIX4DK0itBQxKXz2vX0mQpEhiEWlVrJKcpPnHOLMjR8jlOLjoIKeWwaIMJj1wqn0Fg4y7U8/hiYbHNN+Y23IG19/Dnlw6lTA7MkIYPhQTC94i4d3Xpgv1Exu9VGaPXGgmTXBwOqlamTVl+FQepFOrOSDt954/83XFk+rw32Sjrk0oaw0Qx9JdsGScW1rANHuwb0KJxAsjmJ40iqKGsawSXIisuiJZirXrdgwp/JkcrWh27uKC/vzdrS/J6Xi0jmpMs7nzlBhSgRBLGhbzWed8SpiUxfWlw0ZxxureOAySbSNBzZDLZA/UAXV8SgN200qxYktMmYXETTsSh5GmLD2hRtvvnHjrTceePiJ3vIKdteIzvvqWmGWmn/YdLGfvl3TGeEoAqwysV10HnTREaQZlFHYIdo9Ekrp4SCME48Q5UoYzM7EzTRJxskTNtfM8O0mtKiQmyjqJBMsHcexwDC+SVfv4kMV/OUh3262k3MDXO+HjRSn1vQMINVqy9tKuLteSMOwdAMjVwIgbrXSGEfkYgX5SQLw7V/5bySnxpmAWBTkpjo09oxGx7lGbe/khzQVVxMKC6yO/gABWMdMGm4ApCoQtW5pdCwSc9PD/Zv26JRBp6onnVeiors37qwCCtqYTnbjRmh97d2BCFv7hQlbe/Z562ESYLh0ruYTAlYhslEx+7Wei0PfrfXSZIhouBk+5De4x4YVKJfO2S/UF5510RTxLIOWixtG4AoV4ERkg4tN+ToaR73Y6r2CGP31wfXXHnzkSTQ6QZrDCIGiVWykTAFKKWe635Ji0QmU+MeGWlkdcbKrQgQupjwQhJ6rQ9T0K2FiYWg1Z7Jbydnxt2dWEkfvYlFUdUj7JlyBRVPRKEtV3SRwRKhqlSoiBCrFMtJ5KG4r5Aze3qMNH6NFEpl5gUyb20r6YStJY9IkRXQIlFJKKV0M0P9nn0CIQ0tBqHYdDv1PIdU7rdthMhCZ1rCqWg4cYVAvElV8ro3sjbhde3gnQ9K/7pESjwzJ4aaQ/d7bwgqM1oREZx85iZaSk1ffWStcXn9vQrRcynQYhmHg9fUqMtZqCRUCysHpwFxMFjIz0VAKc7Hh9jeX9fIcTkSkb0/ok5WPTk+XhoGZqojpDQCjR2hRdmZhgiWR+ahK46h8U++cDMyzcTR9K6qj6uk4LjPPVVen0+PVyayOxxc26oW1o/PriLji87/2i1/40T9mz0miTWQmPXBEF7uTcvhGnUYqGQVMrbUw7jcR3IiCkjDM0Mqvt8qFRbLAAtTebjWtK2n4Uoce25o9cChZOGnNfTkrTG0IOyHWDbXAt9ur9q8rfft+FFW5L0ps4UjfcJhXRiIcd8OZ/KXs+Q5M19qN44l8XSLcnpndi1akPZTBV1tKIAL9wz94842bb71BWY5eaz0TP0vUhv0SOPfOEsGil0vd/wcpBHvFTrrRTf5sSpvCNbgxNACg9jYOUcmJdz4FSdGshnaF3KKJcPeLsD6ZHB8f1UNZefP9ZeZJKculTCeT1eXl6dISgHE+P53N5rOZrYBVB9UJMDDb/UaDtRlPJkx0cHBwfmMjaeO9O3dWNjfnzKtEzFzTVAUmaTrN5yrC9pXokNH5HABEwiB1VI12nRciH6m6MQwerCllelJPxzrZu8Vv3sLJycrlzcPHHzg+v06E96+9ev8jT/pYDMDo2fxEr+RvN79RAWo0FRBAVh9ZMuiIDJSCIHWRbv0WOPhAgjgcMWHU020IC2Q7UE5NExGSggIiRWWbHhxhvULFVhJs3GhMgwWGPHwiKmCJG249FBQj2tw0hk9pEpE6ViKiId5sTR6a1izcdG5k1FzETiFTbCxNRyIeimcRLAiernfwACWPZPcCRWJd1O5Py8ZD9ye/83d+QbXatSM1CrhNN9r+2BDu1qCZSEmK6H6o6WH0nONJwEhYq9tOPYulrEcQuUBD5sEPojstSQ1r5LwYMybHJmV67x4vBc5dOf/Wq+88vTRdOTjZIRqIjDOXhmEyDH4JjipZ9bx48bqKjKqonp2rsxkRnZqRcm5lf2fHv0V06cL64c4eqZ7GOC6N89JORGoiMPqxG2mYXCzFesiZeXllpdY6XVkZ5/OV5eXT2Wx/f//05AQiS8vLVy5dUmB3d5dUj+7sr9/ZX7mwfueR+26++fqDjz1lpEuZEyJnTiEhkFRhZiWxJLmIELOnoHxoDohICdxulCawUZr0yhDaJhzYHsswICL4lC6i9nMqqZRiQTiBmPqx8K3M5yBi9vEdjXRtxki8CpjYqx4G7eSE2hRsVeM7bTaGQsHw/LIpPSK7eI4jnWluXKXClhRL25w8vqV2D3KvRfNf8aB/GxxlFovTeHJ7LCZ1jRGKZZiQB+bTCNSi/xSN1DffvuZ7ZbA0j5SSDZDK0NqxzdpuBq3PQDJrIlokKOaPa1VRacWHbZE9Ey4ayM02RkRZqPtuY21fnaMgFUQ8nehD48qqIjJXxeHpzu7u+mTS5zYA1FrHWk/n8/l83o/+q6qkWsmNKIxjWoHjOM5nMwRb7t44Wp5MQCS1gkhjlO/CKcd3Kd9yrmWjpcl0OkwmhXl9Y2M+n5tYn0wmPAwry8u11rfeemv/5GQ+nx+r6mSytrKytrY2zue11lF1fmf/ws7h1aX/6nM/8ke5i5iICKlX+DZSAQC/78YpzeaTRANfbyTbaVsHqxUJ5mfMDEQMxE7jioEaQaBE9Zvf+tbt555bYr753HMD4f1vfXsgmjAXQFQvPvPM+WeeUeC+Z581/JxAl6IYvRI+8aUvw7lABaJQrRojdQDthv1qStDo6zAHONVRyjZbdNCJCoCxwpIQKiabzPNw3u3lYp4rkYp4YPD7LQZA4V5jIedymG3WFiMWE1IiKNPNa6/P57OeU4wXzjhIbfyfswk1jluw4eGWAKCgKoroK2vn7k9GIs38pGTXLra9oKncdL/brL/7p6aNF6qgkVYcUGeHx3uHBXjjvfc2lpePxzHNWjGDhGg+jvNa5yJRBujBwxEYAAaYaHllRVXH+ZyJhmEYJhNjPFU9rXVpeRmAlexDIx8QM2VF1XScsXcZBma2cZxLy8sE2AQvMzu3d3fn4wjVk9NTZp5Op0vTKYg+2N3dOjkhoonIqurpfE6l7I2jAKJaiI5rHb7x2nd+9b955kd/PIgTqi7juLCN7J5MJuM41lrZ7nMIbFTVcE5UVNrg6KZfydpsexfXCNjceCbSNsJWVcQC11//S3/p1nPPHb/00sowLA/DUil1NntgOrVzNyEyvvjirRdfBHD7535OgdvjeJ75rfn8gclkSqSqz8cRK/CpP/vP3n7qU1AdjChrTDAOFoehMvQNqnigSFTrODrfhk5fIFiEzNF2v47a5RBpcxJxN+Yru/NBUAtpdEWtsRhknDwXk3zeYfIuN9DYRvGNX/4b43zMyGKf8dAuR4Tgt/Qsdg6OL2ys9g/mpk7C2LyX8bpQHomeb9s373pfI+kYUZHePgha6gqCGgO3wpfFlwI4FAAHtc6OjqY50sRdiGbZVmNpAKosUkLCTkpZmk6nq6tLk8ns5GQ+jqvr6+eHlZWVFVvfW++88+DHP+6e2zAcHR4W5rHWYTJh7791KqcOyYhhnVLryWw2PzwU1fl8fjqfH52e+sDjWkspZWlJma/v7hZgQnSqunp0dHDp3ENc7r90aba/fzqb3Vwuw87h4Xy+cf2W/kiQTRiAVWoVSV1lLwPB9yAbgqnlmP6mqkwAk6KrLOxpOKfywPFq23zzm9/6xl/+ywfPP79Wyvp0emF9/dLGhszns9nsYH/fFmDgM2Zm/64vLQE4NwxHtT7wxS/uP/88YvEAfv3f/w9ePjn56zs7w+x0pqHo7UhtCdTYMJjZFup1FyiFXcI0XYL8lrNN2sMaKiYsdgXGcUQUh0FBhNGdxjBszFoJvU0tTa3hR2tm0jRmePXB434a2wdvXc3xa+hnYhgzt6rf5vgC2N4/BtH2wTGACxurQFylF9gmsuymee/+FjfLsbeWoeoDamEXI3Sf6bmPOs3qxkeYsQY8UWyHtbV7SMHuC0oYADDbG6tISUepaxs2mh5KGZktuyaqYxSoMtGgKqqTyWRlY2NtdXVlOp1cvjxdWro16JXqqkZqvXh4uLm5KaonJydjresbGyLC48jMc7PM4zIAY2wims/n6gEJH3w5G8f5fC4ic4uHx4FwraT67uEhqZ6ofuqBB+5sbYnI+ZOKFcV8fjqO81rX9uenRMvAr//sz378J/+34eZoluUjA0LwK2wMS206x11kXFHRcXgzBtUbhsYWHPHh/1TtY1DVt7797f/iX/qXzpWyTFRVpdajo6Nl5ulksrS0tDSZnIogpkaXcC6SjRW4MZ+vff7zV7/1rQeHIciCvnZw8HvW1189PYUNdkcMtQn72jJdPkWVYEP3ANA4jirCRFbGqYqqgrhXyknffUAqzNajnDwcVKW6kKFxgrbbYYIyQYTkXg2r1tcTvSLz+ZzM2GMGaKEhyWgxDMI77799/uIlZFi5c0Hdtwk+iRXZSZKWfQQzi+rlzTUCwQbl9bvKQGS3pXi3WfVwl7uJFSLTAdxSSQqc6arHAnObV2T+mK17ThMAu2eNbn9xJ29mqpP0YiLSU0qZDMPq8vL+4aFtbFQdVavqQDRV5XG0mMcognGsIidLrLy0d3BgNHX74ODW1tbq2tr+wQETnUZ3ztHJyenJifnVNWJaRFRFZuPI9sN8LiKzcbQP1GAsicWLyM3j42ORZSImevvWrbVSTsfRvrI0nU5OTsZxtJn1IjIQvfaNv/vJL//+1LMmIqmNhSIASZnULh5pIZgq1W1vImYuEaBJYjDTXNro9IzSme0sAP7Oz/5ssbw60ah6PI4rk8nJOK6tr0+HQWo1S74aDwMldB6pHolcfvbZj7/88rsvvvjwV75y9NxzGQ790tra++P4vzp//ue3twcbKZTx4eQfe4eo3e3pZRJ2a5Y1Z6n60JqcPhWU21MoVDmqzG27o1TLJ2YFlQUSuBknUIXry8X1RIciSa0UNlgpxWq8Yjnsk9jDkrn27d/0hAylzym3dvbtGCLgRgCunF/3WxfN6A9GVMWVi+du7+wT6PL5DaSzGlwYrKYd/wJ+iXlXkaPams6NDlSybSQarjhoWGPGduCVQDVHHzlvax0BWr20qTsHifgmWwAA52OKqOnewgyRSmQDgAuzDsNQyrxWUmVAgLnIHBiJZD6/vbNzcnwsMRdpdmH9YO+4RvTxwoX192/eJKJxHCdLS+M4mitkg/JD/sKTj0AVqTHYyIKzGiJSAAHGYKdb4zgQ7Y0jiKbDYNDnIqXWk9PT1enUtNnp6akxwCSs7slkksRDcSsCOKMLOs7HLB8w0BzUQ2043F3EbCKUg5gJVYRBzD6JWcOKfOMb33jruefWmUdgVCXg0fvvf+uDD8ZaSynnNzamy8sbIru7u+a5SOi5QsTDoE89dfzGG6trax8cHh7P5y/v7f3w2lqywvnJ5KTWf/LKlSEEj8mnNh6jj93YVka78c0G1RAx+3hKbzAI08Ki5BS3oXalvUTsF3+xsIrO7ZIoZ4+Qz4CqsBsuH7IeqJWJ+3rcG2EVbz0hIihM8BNRGcrNd67CTs51ud7e2QuYQdhEBPpga/fS5roJ2Ns7+y0uRXR758D2cXv38PKFjTSzzTKO822aFiBSEtIWH0i/vuMuN+D7wrUF9etlldZoHaV8AJAx94sb67d390S1dDHFXiYrsCeyGSuz9y3RsLS05IcY/pgZz+Yez1RnqgrsHh3NZzOEzzIu0enBUY68rxMc7++fAstEenQkEbELIdSqaCSuF5LOmKdcmKpxrwAMfDCfD0SHIqPqypXzR1t7K0QjMKjOa90/Pp5OJstLS8MwlFKoVlIdiJaZv/GXfvapr/xIK+QkyhJFpCUMGC+pKpcyhGVIESYPuelxuEbPiJAkERcehkF9oDKqtxWDCK9+65uiOlctwAR46sqVt27fnqkejiMfHg6lnNvYoGFYmk7l5KSKnKjOiZaIlktZWll54MaNjcuXuRS+c+e41pvjeHU2+1zwcFU9BX58fd00KPKiujMjCEIOSa1SxzHLctwrGHLwD8zSVtVigaugHWbKOLaXfIgXTyFmdqUmscUZ9/rlAIvrQXSois+UBWKmNINgLb7h6uT09ltvX/MJ9cEUt3f2g3fCelZksdSt7T0Alzc3wnNaUGa2kJtbOwCunN/wwHX3ic7Qco8gpwwlO2n4fggTrn298W7L6ZuTRT5x4S7bWvXixup+TC1vca34jCFttoBwAJhf3BhKAXB0bmW4PZNw7cim3qsKMFcVkcl8PtY6ROf90TjW2cxOn4Db7968UMpcVbpRbBRx+bQXnEVN2Ya/XQCf6qqqUaopwK35nIlmqjMRAerN7VXmSmQcPqgW5lJKVbUKvMJsea8JURV95eu/8tkf/kMfSkLmTVnoRMHKNksmZbCbmWnIuJHqx2gmmnuDFE6z2efqoe9XvvHNU9WiOiE6Ffng8HDv9HSJCMBkHPePj5eWlqZLS9PlZQsHsMiJRcJrXRKZDgOefPJv/cIvzICHP//5pb/39y5ubj58+fIwDGPEfSaTycAB2G/T6Uw7M3VMXYhYOhDDUHwcZscrxuJxUwNFG1Qwtgt2spmSdoQ53vbatWtWqG363AyYzOvaMtKEDodYpUqtlQilDJPJJNVkLL8fJk3f/dX/2pZE8O5GaBfH1Xx6MrMCevPOdmf3ZnDOQRiz39reu3RuvfdRF7tb1WdE2K19aQXHY1v8k1rjOxJkHyjwAL73xlLOPrP9iochz7Bu7m+5FI7RookpBmRrb2tlvrKyMtkdT1UHokNVJjq5sI7bu0xEqnPVUXWJSIEhnj9XnIrUUPhV9VgEwNxMEdUsxmo4jn8t0O3K1lIvIppX3gACbNllLqoTogORVebRIuSqQlSj9tvufJlMp1b7wSJMNBDV773lGLYefaMi9eiokRtZaXcVQVUuA+d04Qx2qVQBfHyxakfVFMFJhYwLCXgN/+epr3zlrW99awAKsDaZvLe/vxTBprq0dOfkZHV5mZeXl5eXT46PpVYGCvDObPYAUA8PD05O3v+VX9mp9dNf/epf/5mfefTcuQfPnZssL9dxXFldRcThwgGmLDQzQsmLEBSKKtV2wsxtunKnNax8zFYvItmnajfSULOdIjdjrpHI1atXU3k1/za8iHb8oXoaA4vXuvQlXG1FanWwni/ZfvdNfwiACijOrU63dg8ixuRUeWlzfWt3P3edBEdnuKKFLQDFja07qnrJBs24sEltG0er7bsA2qBFVYVal3y+Yc/ICmRktDkIMVXapXNrqdCI6MLy8jb2unW6fSTAYa0XSuF2vQCI6FjkvtXVzbW1zY0NEB0eHu7M52vjOKt16c6+XN6Um9uWuDsRWWbWmJkIYKzj2IpdgUvnDrb2AIyWhQIm3TQtU6ocNrOEjq3qnTschj1FC6QRDUdjxtE4XhyG2n1dVe0m1yqicYVnYS6qBEyJvvn/+dnP/N5/dKyj24MR6LGJ6+6uGQ+K1lqHYSDn3lS6jvEs15G4wtfsQ4qJZUkwxUegip3LCMyAonqplGu1CsAiXMre/v7GZHJ8emoO/HRlZTabMfOgenkYjkVkPh/G8co//U8v/+zPHjz3HIBbBwf7+/tb4zibz6fTqVHpOI4DFx8TnWJFgf6WA1Wto1/QCNA41ubtLZK21CoIXzUIdyHW3BuiSd0eVTU72H0PS81GugRAn2OzPkqzIvyeccSScj1EZFy+c+Pt46PDLr7mq16d8J39o5ARBOCdo0N0DpLZxUx08dzqzsFxLnuhjCJ29P6t04sbqwqMVea1AjSUVkid+nt7/7gxf6j+CJkAaK4jRUhDO5YLS81P6uadnf4IDvf2lrHw6q1WAgrw2Obmjf19a2xYs1h9rfP5nInG2UxVrWZjUB229iZEJyIEnIjs1rqS8Qjg+IM7q8zmqY7A4Y2tVebekahhuvuYf8Qhdr6upMSLOMSeUXl83n6wXMssAuMWOxbT26rjOK4uLy8tLc1nM9NIy8xENL+9+/yv/bfP/thPuL2caNfg3jhKERWZm81MgeuIfrGZUcnD6R5XFQ3tRH3BHBGXIlL/xE//1C/8+/8+i6yU8trJyQgw8xwYRIjodByPZ7P7n3pq9sYbMxFeXj49OmJghfmk1hNVJvqN/+A/OFYdPv95fP3r54fh+PDw1mwmIuXoyM6UiexqFe2U5ALRAMhKiUyXGX8Nw0Cu6twF7AeO5J1isGikzR+KCVhm2Lz55pvpLnq+nMgr/7Wn4LNL8svX85oV+5iILylWYl+/9u3fkHZaGgQNQM+tLhNhe/+IkBdNBLmTnyEB2/tHwT9nlXFIaQC4vXek0Avrq3nFtt0v6ycu/py7Xv3OUmYReUQ9rurRRnvn11d2Do7dyIspIG61b6wIUTk46QEsba7pnT1T00wEZmSrjQhqreNoHvlSKXOiQjRG9dWEmWudqx6KHMN9CQCTC+s3tw8MV3PVgejICqdD/DEwB2YiQ1i/au606qUr5y3afP7ypjqfy6hQ1Y9f3kQnsJhoZ2tvA3S0tTvfPZJEOVHpiHNgXplOi3pYdMrMqrR/8ur/8y88dP6hK888Y6dm3QhnaMkPgDDWcYnZJ+CLmE2OLAoAVHXrO98h1QvPPEOtet+pBVZCHmFdJhboH/+pn/qbP/MzAtyudZloQjQB5gADF595Zu+FF9782tdWl5cvbWyMp6esOjs+huoK851xfGc+H1Wf/NKXvvb1rxNwMI4rRIfzuQJcK7yYR4e4UQUIy8wFvKqqzuto7DcMg18zbwGjGG2FVFbmEWVyfGgj45LqiUmqAH41u52EVeeIKkax+qM0YNLObEvqUvCFS7v6PRqnMpMstYrqzo2392/fKMMSevdQAULWWqyurNzeO1TNqyOsHQ995dOCckegIUPa3R8OZ/XiuTWkUeVoxZ39w+l0KTZt3zKm04sba3f2DgNLC49L0kHKMujWwSkh5nTGvnyd+8cl/ep4MXPTdUQiYk2CTKSq8/mcT06YaD6fe2ACKMxsF6ACK5c35f3bN2azzfsu2HPMiZLNVZO1l65cqCAmzJwg7EpbPHDfhcTDpfsuAMAZ+WfiMiyRhmlyY0yB+x56YGt3f6z1ZOcwqVSTC0WOj46KalGVWleHYQ7MxpGAUsr04PTv/8v/8sVnn52JXHz22fu/+EWNUenpj4hIHUcA15577uCll5aZH/riF5eYLfZ+KrLMfCKyPpnc+c/+szqOIrL+9NMP/ak/BSIuZaaqRA998YtsMeDi4VXbxT/xz//zTPSLP/MzFShEM5EJcxFh5qXJpKrun5yUYTiazWQcEa1Oa6o7wN44zoDv/P2/L8Cnv/zl0xde2Ks1GMLNgZkqvfbaa8YDprgcR3HH3zifqyoRT6fTYRjcZG1BV79xXLuAanMQw+JqhleeH9H1a9eaIZmcEoIjHhXXKSEPTS33TUTDMCwtLfWmpn0sYraqqgdbN65+7W8nM1CbB9tKsTy1QHRre68FXhaFdISUKXGVG2mc3hHn5fMbvpEInt3qLiUJsys1elj/nSffPYz6JS0uCx3D663ffH6yvT/N5Fs4macihxc2Vrf3P7G0NCWaA/cPAxEtEU2Yl4ZhGIZCVEVOZ7O5yCgyUz01rStyKnL0xMdXP/XolQub1FaaiDF/NmzJtiQvB+qsFG07IDukKIALn43iSo/bewdk7ajxlRf/i799eRg2S1lmXiZaKWWVebmUlWFYXV5m63CtVTwGAysIsVJ4t9hVCajAGAWk0k3Me+wnf/IAOP0v/8sJsPK5z21961sAmPnyF79465vfvPWppz7+7nvLIuN8bnXaH/9n/9nt5547rfXghRdG1bnIlWefPXz55ar6wLPP2m4rIKpf+9rXnjs4mBCdY15lnjJ/8itfuf3tb1u8bVrKlOjcMBRVo2ZRPRF5/viYPvvZq9/+NgGF6JPT6ZAMEos/sob+FGlGUd7QDJXR0go8mUyYWVQgZ4ZjuBNvWMspGGaTZBAldUQpHGE89HydROjhBrQb3DUmEvmMNIWipeAW/GqjiY6qReTW6y+UxZks/oNZkF31E6ned3791s5+PCRS//FNbWp4UUUG43UgQlrFPm9t70X3nBd6tG7xhp50+ekuPqU08vtNdIrE33lgZeXOycni0sBEK9v7xkm8tHRyespuRKlYm5vIGJqNVIloAEbVApD9QMREWzt7Vy5s2jL6GtUcR002nTRRo50opPwpEWhhddfZJnPdNHLbRHzABxTAxpULur2fAd8hDYFaZ0dHhmkK2VfCTUtzvOGEPC8P1SORlWGwSsaD//w//8Jf/+tXJ5PzQxGFfve7Vte5BCwzXxomNx56aPPNN6dW9/K5z73/n/wnPI4g2mAmoJZy8sILn/xn/hkyk6cLv//xZ575wje/efTCCyZBBiJ65ZUrKysa47iORUhkQjRXXWdW5mkpXyJ66s/9uTu1/sy/+C+uMt8Zx9SMI0DAo1/60t63vz0QczbAqQePwrU1Q9d1jrYyD0DtEscaNUsh9f1korgiVQE6ZUOqV69dDZfStJ9C1cfkWnVa1/fTrFELjktcUwjSGBbdyDnMBwCH2zf3br2X8DulFhcTRbBZI+J93qLTYYX2bHTGcUqbtLFr936dj41uuF0tF9uAX0pGveLq/hoPWgTX3uqQ51tXxfHt7Q9KeXBtbVQ9ODmh5GHVQ5FzzADePjysqo9PJvaEM1GymaUlwgQqAfj41p2lJz8O6I3bt1Vx5fymz5RuQqdH1CKeeibSTtYlPebnbTaBQKDSX6MHADQh0rAp0MkmqIp1/6te+/jly/vHG7uHiHJ6SVXRYyoY+Bzz8PTT9cUX7c3955/f/853Tr77XRVZf+aZ3ZdfXhch1UvPPDN94YVLTz/9wWOP3Xr+eVVde+GFj08m9//pP33w/POHzz+PyGbf+o//47PbBgCsAquRpg1cAMB+rRW4aM4pkXoATCqw9vTTL/xr/9r9P/mTP/Xv/DuTUpiZCo+Kp7/6VfuuQK79/W8OBBAXUckMsHGsSLUajGEyWPxZRStqY0UFIhKIKD92576jfgoFmordprbkGBonA8qhRG0NUPXEtJE5k4iUwhrNakDcTNAzaFwbdeOVb/vj7N/4hUDVaSV5JUJnivOrUwC3dw98DS7B7DGN2hc0LlqI0gT8WOdQ78ZChYzjwnn29N6e1gnthd8XPudig6PPq3uZir9zdLSxtETAlbW1vZMTZb5zcrLqpaZ28IjMPNL9KMNQVadEmdfJIa72r0hNQ/6DrTuXL2xQ2AraWzKhBpJNqEV0A4l37/uurZpfGkYagVCAuSFZlZjnqlMzxIZhIBqAm6enx6onl89fPq3mrJpBsfDwQFpoZF0hOowg7slLL61/7nOzl14i4Og737m4vHzuS1/cf+47UC3M8tJLl1Uf++pXFTh+4QWo3vy5n3NdQiREa3GjOhYrc3KnSbREHgaSWh/e2KA/8Sfe/sY3Hnz3XQDzWnUcSfXL//a//bV/5V+ZMD/+xS+WwsVKnkF5LzeAR7/y5SFKbYEAYGkxSwkNA7PNiVelmDdh5qVFejkCSJTrS7muCiKBWcFOK6J6/do1oiietGH28Gc25eMuk7pMiMkJ9o7fOQiFaJJy0keuiogg0j1TwqOivNwGAFGhYeCo57NBHUGSectXnDv1DCYienlzbWvviJiaka8q4wgbOmFxcp8ZYvjpa7KCIReOvfMV0aR1alQE0Wsns4hoCL/+aD4n4NbhoQDzcWSi88w5PGgpqqnstbK2tntwUGsd1tdvbW9fGgYKPkGcgahqbRMxAdze3rPhsu3NXlZq3nsKhYIXp536Vbp9rMHcomKhKxVc2li5tb1vW7RgXd1cHbd2W6U0EZiXV1fXlpaWl5YK86bqJ+a4OAz1ypWToyMRGUevdp6fnhpZcs5wjdfpd79bhsGOpgzDnaPDy6urqmo584PvPL+8srL2zDNsTVTiY1jXft/vu/b1r5+bTm8dH69Pp/evrtZPfpK/9z0AXEodx8nSklWJ2+GUYRjn8+XV1TqfW+IawMsffPC5hx6qtf7dv/SXVpg/8cAD43yutYK5zuff/Ff/1Tvf/vYH3/zmE3/6T4so2dO01e2bFzPAWcBHK1kMrdZqxYxlKAAhiQ82CEDsneIFKi1ShaRrOx1SQmFXwKoxNkvjMgsmcl/HKpGIkglAC9dtaLSemffr3RSdIZnq0bC2+8E72+9HOQ4REYMKlxZzMhcANtp27K/wsuKxbJrt7w5yS0G90kVU9dbOPnG5snnu9u6eeRSXN9eT8VQETNac6TSb8xyS8t1dNq7sK6LTxkb3lWa0nlFl2XXU9KEqAQ9NJtu17tZ6aRgArNsw8djO8dHRlGhUffu+c7K1lU+T4N5lor1me2hKMR8GFMAMkW1BERGxDdoQI5XRpSHF0VFMcIYLLhsxo1XOrS5t7R46FXIRkOWBLTJEk8na2tqlS5fWl5dLKctLS2urq5NhGOfz09NTunhxHMfT01Mrwz49Prbyw+l0mqNt7LW1v39uOrUCjNXVVTo+WrdZXzbS2bbwxhtqJSLhBorIJx98kIgenU6JeRxH3LqlFy5Ip/A1IwVEqrq8unp8cqJAmUxspwXY2d9X1YeXlydEO7u7Y61qtxGp3vn615noFMigALWUg3GiAhgmQxR8KgAwk13nx2DvGezcO6dI7ad1NQIzvIhKxmwhEGr4ItDVa9f6eI/lY9OJ9XMkEyjUKCLStFZoWeLihW5ZC/a/ql577jfns1MX7y6k+gyLNl7ygXkNQXf29jp+8T6RFiFOG1HVQgNQvbWzd2FjVURu7+zPbVBbvohskEBTmPcyjuNcmjiKDwbP5HebxdHs05PtPaYurQQAuG8YQGQj0Jfi/QenUzNsTkSMmZl5Ctx39T3NqD4gUTuxJzKK1DoGTlwzW29DuhhWNWuHpRDUEKwuBt3Cp+4d96Kk809COtpGFVa6Ma/juHRudV91pjpVrUQF2D45mezunp6crK+sQOTg4ODc+vowDLa7qrqyulprrSLLTz8tr79u52LTSDhmdM5Kubi6anYwrywDWN/cpH7cgnrfpdQKZhu2OEwmtoOT2SwtpzqOzW7UVgw11ro0DKfjeHJ8bF3BzHxYK1R3Dg6YCLWexoQKk5tWemHp90mZWAGFqg6mULvX4BrVWdGuP1SNHj2OwfPh3amyXyRBMekK8AsWFH7rscC2ECK76Qp1uy8sRbetTSxzXBQYlVj2BFuY1RKrDa/jGAzaL0x92UbsB1s3yjB4bYkJnKx1DVUKlKZOEXYJZcNh3JRiu+Viar/WKtXL0Tgu8hCpt7Z3jQk1UBp7JJHa55qSfW0tlzY3tnYP0jvyv/gEE108sKzATQPXBZdWubC6enpyAmCZ+VTEQiN2EuulWBbx0vLyhaWlcRxLKevDYFJtYCaiCXyGK0Rm8/lcdQROVZeJTs+vW5ofMc+ALI5ditvYokwAhNzaAnyOp0aML53uJv2aZCcXnu5JwDsfmdt1i2sXN252BZgfTEt9+Mrtt2+d39l5dHNzS3V5Ot3Z2VHV5emUmK1X3ABMtrc1siSJf5vpdePwUDc357PZWOv5nd0BdGdnJzE+tzFgFiOIcSLOlsGuVWRpGE7nc2sHsC9K9Bba+akJokzTqm6N44VSjrsRYv4ny3IRjaonIpeffXYymehcpYrNCRuGiXmgxjKDPdSbVFR9EIJoWSrMPm4z5lYT5SfDhUVTSLZWUkK7SGaB9HD9zTejdqqdnP2Q/v0oojFlPg1+IKrZFN4xYXSR/m/X7yMquzfeQdSjWqHMQGAeomHaJYYoaq1SR7hj7Rrs4uYagK2dfbRraJSqZCAVqYbSW20CG7d2di9srDGRTVHe2t1H82u17doa8kVvb+8iuhI07DTqrobqXn4nYLMk4sO8tX/t6Gg5hrBuhrNHkTWxhz20vn5yfDwpfq9WYR6GoTBb9ZjNYRuIDnZ3R2CmKsB+rcPmWk72pIhKWh1jbkqJCi2k7tPWsX98nHwgj5ysQvh2Y7IpaOPK+fWt3UOEWtt87MHZ9RuViIHh4rnjt24u7x+fAG/qbPPgdHJ8bLKTu3mMRhOTYdibzdaGIdVpsqKUcnNry/TfQ5/9LH3tax9sbbVVev2zH4VNIMgKLWOHGvvVOL6kCumOcB63DgA4UWXVWfBtDS1mf52rVpFD1aNa/9Rf+AsgKlxUlBR1rKUMYX+Bsr3EJHoNp38ymVg/NCEDJ3Zw0Zyd+tHmP7NKNhIuziux4dIcDU/NQOz2Rt39CdaEQH6zjm8qH+gPMVcZeb0ciLJrXFXx/ve+PV1dNavXrlbsLH2kJmTff0QFgh8AiMqD05XbO3sRX2p/coptrrcfW/YhQWE29JVzmwCOahBAU0VNz+NDXhrKyvwH+/zl8xuIJlWE4BCg3to5ePWd1WGAyBLRkKUmoXDMhB5V9w8O7ltdBTCdTm1M+6SUlel0ZXV1HMfZfF5FTsbROpCsTPJU9dInHmpWU9DZdCUuCm6RCPXTj8msFCXuNmA5p62asRRdqZ2tLWrFzhS5jKNqwQhAdWty60R1ENlkfvL2wcv3nR+HQS6cu1nHT20fnIxjWulO4mmf1ArVg1N3qXqThscxb7EYVW4+//zKbNZOJY3N+BkAonUnKCOlM/JsJIiD4k2xAUZEAD6Yzy8Pw6nfPEwSvj2AUdXe36v1f/MX/z2buUKFWDwoU8eRzYAnUo2plOKFLJLGsy8o54MSkNOcnDZ8tJXR8FCK2QpRDKPo4tKF+Pr16wu2YJvZ28onYaDDQkiOrFLzgZH6U1VlYoqddE/Gyd72ZGk5XcogEo+UeRVFGLth7mr7V9Uu+H3wysXbOweN+ZsCduu3C+y0M8+f7uwfXLlwzpDZC5Dul+7OqwX7maiPSauo6pUL5wjAwAtACAU4vb3Nl85NROoHdxAUDHiHLYKBh+n0pNbZOK6vrQ2TyVDK0mSyNAxra2urq6vjfH50fDyKbN+5M6s1KWm4tFlK3hPv9heA3ePT7jxx34VNALfu7ISViJ43F/5zt2lBRMCVi+fbuYRev//i+Vvbu8R85eKFYWn6Xhl2Xrl28/T0PPPn6ua7n32ySr1wa3tfZEIeAaWIxqem0nHMYtJckRvS0Slp9r5EX6QfcSzV2jZ6qzJ1m3YP1Pik7TEbNqIaH6JqXdYnppcAc3dr3IYzUz0R+dE//+f/kZ/6854iBSzvaqBqrVanbYQzSKQHosNcLQwgPmOiDbjQhXPw7iWNOwHDu3BV09fp2j3D0hXp5CnmGXfazZeSx6zujfobNeqow62yi53CXoMebH2Qfwpkc88ygXIXH0QlacsUQucbyH0XN29t79qYOt9gzJeIZ2aqKZ7Rbej29n7sQfPv5AqWInAXpNew1xjaY/wxzRQLrO4/TE/GK5PV77z55kpcGhIro1zK5cnkoeXlcT4vD90/OTxdXlmxSRbTpaWVlZX19fWT4+OT2ez45GTn4KACM5ERuDgMR5953C95lipxYRd1msd+ubW9C+L7r1wCcPPOLiyrL9aMxESMGPVHochNgAJmfeGD21vpnlDICyIQMxQ372wD+uDTT+zdurO/tfvi0dGd6+88uXt4e39/No7S3R2D4JYCLD1wWa9cmL3w2jzGDPRUMFOdEs1EFLj/i188ENl9/vllu0UgTjLpNhnVfm3M3AkkPfNv9EvOOhU9U2XgsNaqOgJp7ByK/OGf/um5yB/96Z+2TWjXwMjEYM/vzmazYRioFI7rRcPnFo0mIkomcarVuA8iJM9CZZNLq3aNAzqjVVWvWe6XktS75L7ZXhK6vDOwbRVxWavnfxfWlkOnpeba3n3pG/P5fFGALppNfaAWoUxbF28I3fjy5voaVG/v7LXnRJlgKEsKMnaGjPbHVKEhM9o7/p8uoxJLC4cK/bdUj20sITP6si4oQfd2duf7x6l/+p0lnZ1bWjo8OZkOw9L2/mRtjW0SgtWTM5+enh4cHe0fHt7Z3T08PT0axxPVmcitzbWN9eXT4+NAja+/2WFtywD0zcMDQC+fX9/a3gcRqM24iDB+Rv4XbmZoyPRhadWx1BXo2H8e/+HPv/p3nzu9s3dycnLr5s0VZgGWiJZUAZRQehbvWLtxG3W8MZ8vpcGYmFGdqeaIgh/9M3/m3/2pn3pyOj2N6eJpmzVcd9yqQAU4/tXFj9mW7evVhgSpAjCOncYNcieqp6p//Kd+agb88Z/+6S7Gh2aahaJhLqJKalleLkRgHmC531oVMZokcr/Bb3YXsFgJiKG5dMzZv4wtW5mULYPAlERKC9QHP1b0CeU218WukvMBIqUUF7K5NvUejBAuBOCDN18L2e1WcxgNHR2JUCyrHWqvuKlHoEKwOujW3lEyKS2KBN8HcHFj5bY36McizvA8FuzpZjbH3w2mhcQ5glKqOtaohlXNthN752Q+2z08nBjS0jYJblYbVWXBBeZhMgHRZBiISGo9PjmRWndOT/cODvYODrYODo7HcSZyKnIssvzwlfnsxKE2qgKiIr3VLncIef/mnYvn1rb2DnIlif7YdegGN6/cELGdWs2e0YxLP9U8UwUe/vJTB3f2br/x3s3t/WVmM56LRdTD9zDO3AZm73xwIl1CIYSaV+8DAB790pdOVZeYb3Zjxo2c5tqCYUnVsGFXIRGMzgmoqkxU4/MKPPylLz3x5S+rp+XkU1/5yrVvfpOJR9U/+KM/uvr0042Cwh/TyMMYAUiNua42GoTUKzqgCh1EbDpNJbJp62QpgYwwZOtRiTZXi0jlgYqqV003GwYiArv3yeq0/MLQ0K5JwKqpvdVtP7LLx/wsrSTbDXKo12A2wtDwjW1tex+8gxhBjE4upsYmW2hp+QlKvEmsgoMeyb+EgQC+7+I5qN7ePYAHTQO+agb67uwdlv7Wn7O2c/vh4sbq1t5hRtdTmdv1fyHtGi1RqgTNHdkQSyqXNy+MdLS/j64KMsENRCulWKfP1rmVy1wImI9jHUe76nF/f//45OTw6Ojg9PRoPj8RORI5Eqmba5Pz6xmGTWVLBE8TCBR9DUzb6dbu/uXzG1u7B52B2cQ92wVS9ip2I5Hjn4JYzKcLoM0us59XNtc+8aVPAjjc3j/Y3i9EO3f2CVidDElbAJ2/srl56dz5ENP3PfLk53/0j6UhbKRobP83/uJfXP/CFz7/la9IX9JrPfCqTeR0evjpr37VTJHSZI1K3MtniHj6K18BGpUS09O/54dsD/You4yGWhxHw/BUiFonLyILY2wgVYTEKGAQlRp1SESkAqhIjKILO7GL3KRr1tEiU7uOScKDjWHRdO3a1bS9+uJk8uSsl166d9S1KKpdMixq9EJ24XIgC94w5D6ore3tF7+OUiDiHNVpewncA7VTBUR2rVXUoiW9qYrUGv5C48mL62tbewf+3fgWkJrej8H4M383595+EAUxboeCImS6NwSgqF/L1D+xLSwDEkJgBYbL57Zv75LIUsQd+teEea5aalXm+4/msykz0fHxMVTrOI7zOYDjk5OTcTyu9dT6B0WORDYef9DNzqBOm15OPqPGEFDaumN5IiJSb9zevnJ+/c7+McULHQ/b+ffs7X5VeLDaEEO0CKCPpa1fPLd+8RwB9z3uD7l8bh3hanHwhtUXfearX/3MV75SYtxajcFsxPR/+NmftSIthY7jaHf3WRu883l4l4r2v6Qi7q6pEpGxVuYsE5Dc9UDDGdMtcWu/+TQ4VWK2G0iaS6tkSnFe51CM85FAwzgfjdlsQLTbZrWqagFTFCFbKCvlehp7UXfld42jEzZeCBof9vFOZcHFTe7VzH+G5gMgVSTmLpnOd9NCW++ihpNghHvr7TdCOzbkpiPVPLZUcArUUdJUS+e11c25XdAEj+rGyuTO3kHPVxpkR0QXN1aJcGNrJ8iyo1D7CnThWvEFky0euHDISsDs1NLynmgNY4VAkPl8UJ1EjQ0yAEBUYkykHcfBbKbz+cGV85vH43w+t37vcRxnInORo1qPRfZFjkSGh6/w2tI4n2U8g2JdHRNikQHbhsyluHFn99LG2p39o8bBULX7ACrB0213hYHin2T09GuaSMsYRPwn//jB1vbFzTVqSrO5O9/+lf/f53/kj7YFk9+2aVadjZg1ah91RCREyeZDiJCV8xCDkPYCggh7b50pmn86WIgYsrGSz8QJzVcjpJfqzR4nIjZWnQvbvYrMPI4jM4vIoH4TZ6HonzRlqKpShQppVcBSqeAOmeY598WfuUqK8B0DV69e1W78ono5uIaX2YjVbhvuhe2CLEh/Q63QKWuxNOl498Y7ri2biPZUqgfJkgxsdK3f2UbueDhEY0Wb0AEiKHGocqs0AgGXNlYVuLN/BOiF9dWtvUO12Rr7R7ezrAq9y0+d8QWbmNPRLEL33PWyNalK1WY9w7dnttrhrZ2Trb0V5mlcq+tPdF5ypBGwTATVd4ocbG+fGwY7QQFOaz0WsQ5+4+H7Hr5ieYkm6xZ9gfi3c97abjrBpqIqfq3jAqM1DXaGftKFSSNQu31HgTx1S3CQZijZgMGOKBdMyOf+zi88+wf+sc5kdVIH531sZLGgkFDOpUhdSmJmfzpzvpeQT5YJX6gMT3UFhLvq06DCJyGqPthIO4va/q1jNS4opVSpZhS4Sud02HpfBebgAHNRqIW/oEh7Ue0KDFXNwhS/EDhQSqQ2MdAcHq+7RK3Vsj6mB71RNtu2i3vqSQkfujwRYbZ7C7iwgqG4/txvFrBGLZA3BrgNnJPikYsBkNNFghIMbUJJFwRQ+LjpOIXUurC+emf/aPvgKC9ouHRuzS5SIqJLm2s5B6vTV51N7efnGmUhzuWnnrRJodbyj+2TSxc2QDdtipUu/sn0cLLxWGsFHnznDohm46iAiMxVZ6rHqici+yKHIo/+8NPHrTgsofrJmJFCyInBfn7NYuk88J3Dk8ub61Dd2j9C24Jz8sWNNWQ3ItGd/aOLG6tGmvYzgO2D48vn17f3jhIvITKa+jCyu7S5CiDuRDEcO99ktP/mm28kb2ich6oC3v1u5DqZTGxcc45/Sv1hlximZc4oZkmmkWHEbQixqkTNSfehsW16ERNpFzHxktsAlMtD/MzMxSsRYfMGBgkRC9ht6L7nUAuuyVSlcI758kZCYs5rhEFkYWJnA1UB3nzrreIFHn4rmqpypBYWf83YvS/XY+NpbPvygHYhqIWXzNDS/ZvvH+5sDdNVeMALvfF19pWiSBbYyX4iiEajRbCCjeePsQWaWMHGuQvEdGtnH9CNc+tQ2di8sLV3AKKjKtOV1SSxLlndjiR5tF9Hp3dSyNP6ufXQ6Z5dpLCLjt7d3q/1/DD4+20rrnw4BphZVlNiAaZ+rXjgVMR4+PJnH7/86McRpoilsnu5sCBLg8o+rLLs0ua6TfBdXl2PMzGeB6BHko9TANOVtcPRkbKytnEsUMV0Zf1ghmF5BcClc2vGGFu7h5fPryOuz/UyODcdkWKnW48nlbdv3aAWPVYiKsxit+2KGMuhG/NoRspgM+6YF8jP6CuUm11FYPqpFG5lTnHEzgtu3omIcCleo6aqCu4KChupMpnXKHFpGSIpM47jACK7VTlo3QYIteQ1xTUJvSciMUYn3oE9xHhZMpQFFW3D3OHSLnxUr9RNdSmjZnICEVFAKaUMAzdCpz637q4OiEuZLq8kKRjRW89Us/BMH3tlZYC2x2TgsGcnOEOEueZnRwufBRQPPbDcSJvoobU1u95h8UVxfKY9Enp7WmecAnm/uAKAjfO2D3aNTQAwGSYrpcASBJ0BVTr5RSFqW7hTtQKnIjPgWPVABMBw/6VHf88zlBYh4ePL61YSngBtjbbBK+fPaToKiyyjgfOPr6wmrvIyqW7XZ42GtJCtI6KZkmKIA6AfW16DirGut8O6iAz96l3H7kT0x/Xy1/7Op776Iy144RyvNp0zpx20Q7PWbn9z4XJgjfGpGXm1FIJCK9W8ZdTt56BHAcAgJb8wRF0S2jrTrnAeUbe3a61VlRY92UFF6jgXH2q3oI4az1g4xPpIO+vcVKgxv4hYRCLbMN56660IUbkjQV3bkLFsaBdqp2YgRO3eHSPXMWrEs+hKPN/gslBV3//et2P8lSPUmxN8LANAGUlLiB7kBAiDjUF0SmWidMUygoUPeyXbdLLivgvnQXR7ezdJN+ipu0YpvQU3Rs/kn4Q8WIXLF86lEddZ9+b00cGNrSRR+1ONjyq8fl1VZ7WWOD8FKlBFRtWZ6pHIEhERXfjip5srFPshjVtnHLkM0P2XLsYiwm4KC9eRgkRGfHHR+F/Q5Q2a2u06AMx0aOhiAlAQ5N30Ry9RTTaTlrvPy09n693rs2d+yGmwShtEYTLDQbGIWAdiJbJoMDPVSOcgM8kdEXJcaI4mSFREQFSY6zjmnzI0bQ+Q8KCcTbR5gY1M4lBq5ENFZLCmhaxeDnGS/OkTiGxRCTJ9g7g8oPNtYlyznat3A0WJXx6hNoluCaSUpQSQjaQhZgsnoLkEjq8SazARd7x9a+/mu3FOEbWozStzA8u5MYiqD35auWb+iZADlTQD/YsN620zfYBtgVhw8dzKre29K5vnbu3s9g6wqwtKO8yPqzm9hnzfMtJuVwB+ZyopWbmDUnrG8Yi+SiHluo16qwFpFJnbvQcih7Ueilx65qkH77+oMm84CPZ048U4RMXHijmFLCrnBGqaJIWyGXN+R2TEvjJ0GC5RKl7yRqVU/mE05QmF9DPfoNsrArftuDotp7ffub79/tsXH3xYVLKiwQCAiH3AO1mYt9ZqXFpKEZHuzKlEMDkwSgLmPvQVe0tDMOmDw1w1LJG6ToroENntmEKtpI8AjdLmUKY0DJOJ2eKc1VJx7OKS2tAUsTj1nBiSOYBxHKnwwCVse71+/fqwtKSdXLC+g7Q9gojbj/GCKpjIJkuXUrgUu7qlrdB304Tw2994QbKKM8gnsKgAmd+elmBEXFoKKo8FiXXzdvIeYK8BjAX0VXZ3ZQYptQLRhfWVG1tZ5dtpp3ubkZ2U6T48+pS8kGR+onlYSYKkwDwGSiZH2ZoFmFvkMQoJT1WPRI5FTlQnl89/7OlH57OFFoWGk3iUC2xA3KbwptdmRMWSNENMbVtmNHF+Go0BLdsfIafArLcWwwoh83SEXEuHRermumieWkodt8PD0QRUsH3j7QceeTI3uECTPjIRRDSZTGx2vOsSQGrN0F3Y7NA+/Ng/0r5o45/StKx1GAqTzeeRlF6NWdiV8Fmi6FcYdDhwYWK7JNaLMWD+JwBVZubC+SBrr/W2QRMGhVQwYbYFzOej0X0Cd9tKFdkC4Pq52R49DlVRa53XSgCXwnbnzVCsqpuyYzHoI6wAyjBjR7odq9uImJx6kOZUN7XHwbtjGvaIKYY8niSC9vkgnxz/ZSZ3m0CNS5sbWzv7HYOni7N48Gn+oEGwp926s3P5/AbCJ9GFCJvziW1j7C4HbNvKfhrVOQDVqnpqsSvVOTAhuvz0IzKOVM6aKhqMaXQkjti0EJ2rpFtQJw/DYA9nsOZpIBm5ZXRkkZvicQBIqDtQQCExebrjeUpbSHqxuBjgB7G+8lu/8qnf82OtnzEM12YQ2QGRX7nGAJjLMHBhjerAbG6XKJq2y3eTKmpEggwKMdFARYpTmMpYa/NXQidJbeI4Myf95voUlE0wI+VCrpe88gbqN/2CCEE2RHZnpyipzSoiIiWVapkvJ9/r168TUdy4EvyU/Z89upPGg9JUdax+FVOKMbRrxvszJQWI6WDrxv6t9xRo2aFUj9yOB+DS4LTwSL4TGt0PNSiDF6K2VcQHaHN+ClbTGLecO4NFEM5k+cWNla3dQ+RRpReeu+qN9yQgP0gAuLW9ZxeLq6Te8+Uf3NpZZ9aoxe2tSY1HGQ8TMDCb7rW7f2cihyIXPvvYxYfup3R0AOQg7JQGIXqR5OMpCWNjjYBN+1DuyZs2XGh5IaZ7fm5np6jIE05GalZ6aPgwLIA2mv1uzgfijMhlSVpYqnfeu37lE487DbYTXaBGLkxjBPBFUKtPno+RaaYNmTg6blCINORstv5DpHAhUOECtoBZtUhyeqlEBBZ01Qh2GpRbbitMDGAwTdoODaDIvnoZWlCSW7d2kMRM7FlcgV0SJ7UGEI+bNN4FOOLsSVsuVQAiSlvCZva5tcbtgmYrkG6LJK+JZqLr3/nafD7qwr7ig1kJ0yKvADQ8hRQKvRp2bY0wjYn8PqE0lUgBbXEidTxzt4LO/gmNfm51effwaD7WXEezTlPIxlIWMiDxev/2zub66qRweyxweGtnaPZGUFX+DKBjaQVmtdZoajtVPVY9Fvn0px6enc6iMTI/HtK2MW6+ziDawuoSf0gu1/QEUpYgnrvwuHuw38I7LWG1yOWx3LDkgCYotInJBakJgPD+9Tfu+8QThUv7cqzDCNKMhhjVKmIFHvaItHIBQJkIhdXb95VAfVMYKXIIah0r+oAnRUWPU9diPA4Im5UXqSHcONGB0pm2tahaU2FSQAa100o1xhYVu4JObV6kt/7TtWvXLKuWvpr7rqAoJW26xV6OLAvQ1Na7P2RAr52Qr0ozPwosX7jyzrXXupMO6r8HC+RHfide/0CP2Ts8/oeBubt/YFN18rX/3q3Sk//iDB2E4jXOrqqj6syu7bSbU1TPPfnQ9sE9L1777b5+hxD7g77uoXIX/nBWorc3CZjPI80R0SYz4tKk14iqFOZaa621DGXCE/uWBtOZwJCsWVKA4paiCGX1lG+ljYaqFJhMJKF4Fb0v5lYld3OCKP8OK4MwE4igqnZ7qFiEzUdhwM1pzkoGY2wJXQEmEvu+hOSKCAbH5Dc040U10Ri5KDfCwq0nInPOfavdIqFWdx3XwwHn73+oG6iEJKPmn97z9T8wsSVY/UjAH0qT3RMWlz7f3h8+9JOusAznNrpljPDVqDpXnVxYv/jEQ/joVX30cv5He/Wg6UP+oL1xBYL1vijw4GNPWpFCKUXDSY92Ve1dFGLS0bWLVSanW+JOok+tFA3fT707QKHgwlFl4g9X7yuCBZQ98OuxDM0lmpNmmrIMRYOb7fI9C0gNOe/bOInNXLR6tC6ECPdU/B1xg0kJ0SdNJCLXr10LLEXxB5FCzemvEhksAFl/ohGSEW3VWsy9X9FOQ8xJ9ViW/fXig58gUGf+pV3qJ3hPc/QHYZX/Xl73ctX8L9//uwBwZ++wV8JX5thdNEGpw4Lfl2n+hOrY/d9MtZqw7/sf/4d7fSTn/8OIhXsqXgBdacPHHv3kJ574dK21JVDDX7CglErGrRUKSynN5yPzfImWjH8z0mLXAZWo2K/B1f404xWOpldFhODcSy3MNa4mzUyQGZilFKs+NgnSL1KhEB2sQS/NUVUV62mOULV7DJ6Sy+m+/hdjOfucXVlpxq3d2mRBbGvvkC5SSmkPyCJ/Ro0o+sl4HllooQhzUSyfaX02Tz77w28891vRgnf2OFXpH4we6MwP/9A8f3v3gDtuuby5Ht3/Hwb53n9kLvbFcfeAWZYXv2M8qfGzvVljNMQMGEP9iursgQf3TuqF1aV/uJ3d83WXWv/Bj+HDFOpv49V7k2d5mpiHyWCpXauJ81i2t9xx+q1GeMnqs9kMvbgM3vHgs6r5gNkBRqH9agwVCGaL2HCUeWSGQWOwBXsHA1uDsnTDpAy2kgzZfZ6lIYYwCuYiIh4Gm4kuIjlW0wfNxTVFqrh+/c2MNVJM5vHJruptD9QVYLgt0eIOPhgkR0+aVpdaSyklEu6LLA+1+/UUd41oaR8jWvz9d/L121UjfvDGurd3D3FvNv4+j729sw9g73h+cDD/wsaF/YMThNalzmZGZ6fY/DSbqzwCFmmYb2zI5Qfem9W90+OHL6z84Nv+wV739D8/9KP3kr4AkKNh7vmVj3zqh8L+6j/6463uUDXHlUhMIz5DnNkJbLantcclcQ6l2Idr17wlqhrayDqJTBFZN5IVkIw20tjDQ6xQUndGNYx5T6S4xlOyvlrxMXFDesZAq2G2wDcABYZhQK0ILzxlDzFp9exBDpoDkUQOWasCFa6oW2FKOmbksS0PBdQqfpl4GbIejZlLYYCaNW2IS3GjqsAnPvfl8w88xMw3333z7Re+TlyIB1VrJ/YkEIjm8/rGux8cHJ2C8PgnP/VHfuJPtHCohfUUHGF7BV7/9t+7c+MdADe2dt65uQ3oubWVhx+8760bt/cPj/7gH/uJpz7zdH44I5yBThVtwQx7dz5WZp5OlyzWYPhcGsq8Vo0rDM3kMWTaUSHDnuJz9UX1N3/jN1/97qsXV88dfPBBo/UW84t3rGwjMucVqDZkHFBgNo7HBwcrKyvjyoV64cpP/MRPpMYupXBhiou243zx3rvv6Uev0/03aODzwQc/Fke/mCtSqOp3X3juF//Gf3Hx/LnV5SlU33r/AyJ++rGHVpeWAJz/2CPP/OgfsY9KZv+l5XVLKa+/8vJ//fN/FdCloVw+t746nRBbk4DFUJnACpnPxy//I//YJ554CsB9n3h8Pp8D6r0E4ZqG4auNOAGoWueAjGL9BvbXwmydupKqpVOj1qvnaQugiFjTrn/erhotQ1Wxvj3DZPb21VpVciqGdwIaY3qGVRXA4CZrN54GZiezddW6CG91TkRWUd0xMwEw71c0w87JFh6uOmOOZkYOrp6riN/G0YYB5OBXhENOiNxEPCoo6/z9DxHR+fsfevnrv7a+MmWKj6uNw0MVOZ3PPDRGVOt44YGPdyt13utMlPaE3YOjO3sHqnLfhc35WMda7+wf3tk9uPTgw4vrRFunD+vthpA0V6DJDa8u81PCGR9eVWsViqrvXOcv/dIvvfLG9fPnz69dvXq8vb0cCdr+SkGNn8XFqY6hhPOvx8vL+/v7IrK2tvby69cvfOO5P/kn/2SwppuGFHFKW+tJ5Xuu08oMO4HY8HmqBOCxxx9PhxOBKFW9s7N3cHy6uSG16sHR4d7hMTO/dWPrC598mIiWliYPPvy4yXuNicXa8Aki+mv/0V/ZPzxW1XOr02IRHrtN0Y5SRMknB//wH/njNu8mVk/O4iLIAnvmLO/MdQLWgI5aq91rMQyDFxcixlqoMw+BSMDarjExwS61UjcxLyxpwEvpKddANoRSEZklgsB8Zc06WWYVYVeJ8f8mGDiKkNNh08Zp4pf0wibj+Wgj+zIRDaVwFptlcXPHFRrnGyysCsS1b2SC/26uSG2riBm5PsKaS067U1XVL/zQj4qIjHOpYxgSXhI+KWU6GeyBb119vfDiOl3mNu7du/0Bl2Gsenv3wM773NqyimysrljG++w6kbJ8gQIoOjlL5+u4y5TVpzEdvhMcgDVjla54j+iXf/mXv/Zbv3Xu3LmTkxPZ3f3Y+nqiUcNvczzHImrHtJL9Lqr7zMfHx6enp7XWyWTy8z//87bOoQxMjIjA+DqrN3gSYvPdOpm5tOkvjlTukHnt6tWghHb0AP7b/+rnAaxMpwpdXlryjzAr8TCZ7HzwTmo0y2G6j0ZUbM4h8WvffdG0zHRSCFCpqpYEUiDsGqIrDz+pqszExYKkVplQmIhLDoFwyzmXisiil1ImkwFQ84eTkhPpEdZp0g8hEUrgphGymAEuUgWqHL26FGdHvs5iY/FtVBqXwuEVqU/AbrhUFfUiSpekFFQVcaRQwmbZko/BIyZ67PHHibg4aihsq4gMtORkM4UbH8d7ZNkjN8nUqyiMq5jTuQsDRf3pTRUQER2ezAESsfqwGvWxYGAyGaZLEyNeAr197fVeE1I8ypa6deNdHoYymdBkYmb1udXlzDcsTSZvvPqqhl2XWKJGno7XbOO0nWgXzEPnShgy8/+8/cMLf5giwvmLv/jf/vqv/dq5c+dMvJ5j/uDw0PbeO4SNXSNY2hRvLICIPvu5z81mMxFZXl4upUzizq58te0Eev2YTEv03OocnD0U8KqeKN1R4Nq1a9euX0e3ZSOAlemUmK9cOHdpc31jdRnAwfHJKMo8LE+Xn/uNvx2hIF9V9Su8QKDXvvuSKVJVXV4aAIy1Wi8+LDhs5Mjl9/3hf9wvtuRit1IQsdXVMXmjfMhfSVGYw/eYC3OxGWDjOI51tMkbAIqHm7iP5xseWk97HwntUJquF5N3EbPds0rF18olyxBN1tiVUWrjBJKfjVnsHuG0mX2zdrY24auUYSg+pIcpW7GY6IknnminCnBhTx374TfeBaARWUOoILi7QlY7QpGIYoqLdYMQzOm3yZLidfDk62R+9vf/o8encyxc16LVOnhUV5Ym02GwZbx59XU7mxITp1M7qMr1F79heN8/ODo8PUXABzBdmixNytXXX03PwtkvJDu5AgTbsDFmjRkmtnIupQxDGQa3c9jz3iZ3XRawIzkigrj6xhu/9qu/duHChel0OplMrly5sjGfU3jeFMyZlnyYV57V7A0DMwR+8k/95LPPPru2tjaZTNbX19fX11948UWJ+d5GPnb6fvTmxdk6vaiwmVt+Ynmbiome5NUwRq9du3716tVwivX1V15W6Ob6mtHDxsoUwMHRCSlUq8g4nQxliNZmBRNPJpMhLnN65aXn7Q+TwlLVqi/cZGFmS6IR5mN99KnPNInjUtFvrORcNvttH06lEv62KqI00LijjjVvMnFNxlwKD3F7KwCztGGXp/jtf+waObnJ+u1cSTGH/cppBNjn3fMLWjA3k2hII8jiWtnZzzbja9HXcQszpsYiK87ITZvHHn0U8EqDa1evVfFRYCZqSmHTBxK3VzDzo48/Os7H+WymCpMSRAQm792jBt7gNuHC/OabbyLG3D762GPXrl2zjz74yOP7N9+tPvpVRYSZ8pLQ6WQ4nc8BevP1V/FHfDySV6i7fUHAEDwoB8cn953fFJUr51YvbazwMEwm0+PT2XQyzI4OPvX5L0D6kTv3XuoZrOYrSUojTwCCFZYbVo0DbDL+L/3yL21ubi4tLVG0cAzAqDrp1C9FeCMVQVrXiKqsQmQ9D3/1//Z/X3r66ccee0xEhmFYXV19+aWXnnnmGQDMVHhhqbbOTz755NWrV3NYsceViAhq9688/thjV69dBRoGAqul94WuX7/+6COPvvryi0S0Ml1amgwiY+eL4P3b20984gEAN9+5XqWa3tcMsgZWzX4GsLm2sr46NQ0TXjuJql0ff/njjzZZH0sl+LhRW6oNTCeiYRjcHLY+hcjLcmEizGbzMCIJPqBvkQDYOgTOLtVOJ9nKPh9cZRdWe6y3BlbtirpMDlsfiKraTYhMPCBgeD6pUyYgH4sk3uCiNl3DJSFgJneIJaniIW/T+I8/8fgbr7+BvJyCYpKfqipM5AF0/er1j3/8IaDNmFNVRM+WWR5EDMTYPtV+qRSWmEXRoAqitUsPHN65UWeiVawWTMUeWtZXp9ODQY+g0DffeM0e89jjj1194w1Q3qNBgN58+/owmZzO5iez+a2dvUvn1wPnuHWiw/kHNlYu/vrf+9qv/72vIeyl/t90ChC+EBGdzmYUvyZXOCkAf+6f+3NmblqmoTCpYqwjEz/yyCMvvfTSO2+/c//992d44oF33+XwTbj14TjD+K/BEElg5pcyQKofe//9tfvuu/rQQyZBLl26VIYhJH3pCQDhAUk1/RwOYIyXgMIKdd54443HH3/cimpJYSNAzPmkmP6pqgC99fZbN2/eBOjc2qrNroFgY23lg50DIto5OKxjFdVrr7z4B4nh1QvQJACAgFdfftEkZipSqcKluK9cK4ahlPJ7//Aft4iizaWwURhpOlieRsFSRWC1UxQJJQAogzXQkhYtXMVYyL5bEcETl9GIu2Et59rcJYpWPPJyzWoz2CmLQkIYQlXEugNrFUpij6UCXKuIXa3SBpG7+jG2cW9NLBTOpGlfEpCnYtCinANp6ytU9eFHH7l+7TrFSRsZUBhdGi7iG29cfeSRh4dhwj7cQzliHRkFsbQXd41UIHriiSeuX7tGOclNfebTF37vH/iFl74x4aJD2D9MAKtUKmVzffXmzr594803Xnv0iaeg+sijj129ejUDArfefXNvZwug+Vjfv3kbRLd39r/85MeNMbZOiWiYLA+m889wY//v3b+enJzc86/b29v85/85KEQ1Jm7CY5sEAP/pf/qf2iVGk8nk9PT0qZ2dxw8PXyWaXr6sW1uSN02G+EiOTSOGgoFF1SptHr18+ZOnpyu3b790+bIph9WVFY+UGAd0ITfhmNwGVz5Zfa9++r4pBUjx2KOuioMkgwai1YxAb77xGhGWp0u11lrnVca1ZSspodPTkzfevLY8mUD17Tde/cQTn9LQYMER+J5xLxSE8xvry8vLvk2LaYpnYOfzirjFRn2eBomIj57UUBOgXGrtWtmzHsNV3TDIfO5fI9ZoQlZvjgkVsLjUjrncwyLnrjbjKcsooGCOKeg+wdFMBTie4zlchqEMJUNeUX3ltVq11rgjW3PMiQdmxCewuwe7cBeGRiE0P/744+REACJESMb1kohUqQp96623zDEISdMG31lOTES4lGEYLJJr7i4TPf74476mBlYVmJ3OiGngwdxNd3mGAYqh8OaaFy1cf+1VhV69du369WsU8kJVb719tTAXpv3jU6kVqhc31qpqrfWou33yI15nokH5WllZWVlZ6bk3X9/8xjfNRCGQiozjKHHZKhG9/PLLa2trliH/zNbW+u6uEK0w8507W0Yo/dNsI6GfJSjGIhxGZQXYG8fC/JWDg8/fuWNM9frrrycNxD661hSNEYhdNDJJwuEGzanqY4899vgTT5DHCN2098gGiIjevPo6gMkwQFWriojUcX15SVU+2NodiKalTCfDte+9mIGSEmRAzK+9/CIAVUyXJs0bsXingoDChUp54NEnHn7y06Zv1PpVRcBe15CVk+nYp9rwaAu59euSy6RrGNFcCqIJ16OGptBEyK5fLsWfHFznSItEgaWMU/gFJYOsS2/x+u9MWBgGBkMrd2VMxmpuJCBYnSynkHm86Ga2UKBJE0BrJZu+F/6Yqj7+2GNXr10z59xiyxQhaHJdQaUMb7311hOPPyHMDPUeKdFr164ZqqJtf+Em0SBWj8gFlgnAp7/6+9964etcCpE1bBS7vJwIPvAZpNA333gV+Am4M+KahJluv/umwTmZzY9nYxkKYFUZXiv+fV9nmPPMmysrK8fHZ9uSrOWlD+SaoyLgl158YXl5eTqdzmazjf39Hzo6+qvLy/ffuvWl++7D9vZweGjJoRpXLVIcOYXTy8F8aw8/fG0+P/fee1X1+QsXHl5e/ty/8C986l//11+4eJHc/WtDgtWcebsmAYBX6Gk46W4laeNmBUikqmgZCoMBfeKJJwBcvXo1Q5LpHBLT8tJkOhnMTzISX1+ZHp7OVXWsIhAo3X7rDc/cdFhlou+9/IL9fH59dW156ocfZoulOVX15HRmXoldlFhFlEACYVHYZfQ6lIEiOabQwqzeA9uKiLKCQkqVqiqK4mEjzVKlSLz5zyZS2s15Qb6ECNYaWs3qbsOtFJTljyBApYLIrEiyziIUZNYxeMO9TBUGMbH9LiJSXSH4XqzuUapRiqhArHBMFTqOo9Rq2DZp++QTT5CldjRDO3EW2lZw/c3rFplloDAvTSaTycTrV0wcLK4WIbCIcrVuEZzO57VWK1MxNy4DuufWVi+es8up8ebV1yycmKu1WOqd99+yAPxsPpqsfOyBS3a+2/XDmn/u8bonG9/ztb29/a1vf8tOfRxHT3dH4v2v/bW/ZrIcwKd2dvZL+cfH8QHVSSm/5+Mff2g6XWUmYAKvXnBrGYBxL0BE59fWCDh6663L778/AEz06Hvv/Xdvv3373/q31lZWnt3ZoYiNBbG5xjPc+h3SY43rmk0lewFst1ozt1Brnc/nIk4Jjz/xeIhg5Ekx0fJ0yXOoJle5INy3D7YPpktLy0uTG2++Pp1Oh2GgmGps4uR7L72QCJwOhfNWMTdjFSqnp6c/9Id+wlVdrXFHn4eCtPo9O2Mdc7VMEU23Sg+FWhJBhAiTyTAZlohoHEcLRBfmyWRieJ4Mw3RpaWlpyUShLuRMk8tcVQxlQIa1a61j7YqMI+FMgLUG+GqhqmMdx/koWt1ot2qSWus41nGsnnUIkE6L6C0rQ4GzH4HEa7XJrA4LjzOzORtV5JFHHpHI3XHMNOhrGMzTuHb1qhFN4XLt+jVAufAwlGEYwgiBEYDvuVYTuGdW+4Xf+2PHs7mIyDhqrVXqKKMXspZheToNC4jeuvo6+9WHBRa3HI/LZFom0xG8c3ga3v6ZSyT/wXn1Q5+AhuukG8Nt4bK8vGwbvG/uN5K8fOHCycnJzsHBk+fOXZlMNuPC2AGYABPm86ur48WLcvmyXr68xHx8fDwlWmIugaido6NHt7ZeuH799ffeK889J1HKW0XGcZzPx74uINFLSC/IV+sBLiiBzIB0D9EsWjZ3hB997NGHH3k41cDS0uTxT35qZbo0GQZVqXWcz2cy1vvOr5kqIab9k9nxfBTgrde/y8yTpYlRQuGS3Lu8NLHsMQheZGlOfhmoFFE88slPt9UC6OnWbIGF1bLTp2KMHgaRnLZDNh/LqHc+n0sVO7CGo0izTZYmZSiZBnLBBVTLI49VVIlLeklBAx0dx2rtfV1cLWL2V+yHokOZCPCK7Z5APQCmHvKbhH0fzq1zdxSdQBVjrePouexHHnusp3ErfgYwDNZT4X+8/uabGUTlyK1GaAwwqaZdcxaAbi5+rvaZ3/ePxH2kgO8foiIylsLn190Rfevq67FalWiNmE6n0+Xl6XQ5Rhzp5vqy4eBEfmea79ITttfOzg7ClweoMFum0Kjh5ZdfPj4+ZublnZ211dVzn/kMgI+JXL1z57Vbt67v7anqqDphnjJPmGllhYGT4+NzOzuP1rp2586V1dUloiGikPZ/c9XLy8s6jrf39i5sbKxsb7/88suiOoTV474JgcvC1ACEwIqCGruOZIhE+IKVhFBEqhjH+vAjjzz22GP2cItpeCetmUrQGlro+GS+srx8bmPj4ua5t9/4XlZLuPp9+YWMGU0GhtukQqChDMN0mZmljlc+8Viu1k1hInOkuUR6p/9AlO0Cfk0Z7PrLrEJVENFkMrE3e58uHgLAhFxmwsO5U08lRj65Al4cBiQHmFfq8QvjNfelswLOsCrKXpto1ZcUhQT+Z4kG/TgGe1opntMnYvZb26wYbaHDI2wAKwew5T362GPm5VepVkHK4Y43g1rVorue0s7YpkdTIjAYEjNiC81+NuhHJzMEgxcuzIPFvxVYGgb1WCAsmRShBCGib//a3zIC3D86OT6dqeoTn/z0vFZVPf5w7r2Hc/4hrw/T1d/+9reb9VL8NrzC/NKLLw7DsLa2RnEz88naGoAyeC/KTMRSk/ZcBpZms2XmJeYJ8/bu7sC8f3ycf7V/7dd5rRfW1u5fW5vN50y0tLRk4jgr7y0vQvAgSKLXduJMVaIIi4iZ3nzzLWa/N0zSqm5BXSKixx5//Nza8ief+vT5tVWpc6kjVAuBiWqVT33281A5PJ0vTZbMk9pcX3PpwN5v8r0XX7DT3VxfngyDIIw/m2QqMs5mUP2hP/gTWRpksAO3Vtzqv+Au+gFS6UZszi6slgrjT2YEnSeDeJlqmKLMObapvbhwOESUHybqW+S9Jt/WXKJ2IzKIzseqOlgUwn5h5rx0xOLD4nfzIlBfqOuKNmqEe6c+aMpWnH6/x7ZBCrV60ycef/zqtatey6IaFhFLV0tEwPXr12mRKyKyHcaGW93+Jy8qjjkhIHz+h3/s9W/95sryxNIaXm6qUGAy8IWNtZ39Q0DfuvZ6mF2+4KOjozqfK3Drzu4PffYphT751JMfvH2tAjVmy34YW575k3Yp349+E8DNt6/+4v/3L8QfJGKUuLFzYF9ZXl6uqifHx/qNb9w/mTx3/foSPAmcgxnyuXZS80gi3C1fFDg/DE99/OPL02kV2dnbM6L56//vfzOXZyS0a2OAFg8jRWtTGwAIm2srBFz7rRVqfk3mKQlRlqDAdDKMtd53cUNV4xIDBTAf5XQ8+n1f/Mze4cmNrZ3HPnYF0N/6W//VF37kj1LeJwi88tLzgEJJlSZ2dwcxVKrKOBsJWohHxcNPfEojqGs9fZlr8POyqm8oF47mMf/XWNQjdVHbmx9g7/Kf+4WDQcDVMtVuoXjEFNTf6EEgDKVIULtrLvigG4WSGbJs3RntOFqAkkGgYTKZSFOzPv/SE9hxY6LJWYtR0GJTYVKkiNjn8nTZrk2xnJuqK2eFiDz88MPXrl41j8uz1VZTCcAGRLhBRWnYuOcMN8wk4igRIg+V0MIFDOgDn3h099a7xp3VY12WZZeV6dLy0uRkNt/a3X/9le9+8tNPg0ihW++9def9t4loNptf3FjdWJkYa5kaOqAVd00/RIt+xJ8++rW9vb1+5fzBrffaWz48gY62DwBMp1MR4QcfXL99ew7cHEe5cAEffGAnMmE+CcoOKkEB5h1L+1OJKBL4j547d3tj42PzOZh5Mjm6eJFu3Trd3Uq/1oWa6q3InOPsAympxP7dBUBkAzT9bTPzjAGSORaX2txpUJnQ8mQKYJyPN7Z2LRI5lCIqpE5jLz3/bRCp6PJ02NxYZe/Cq+qNQQIuYL54/8PdOkmluv/Y11fEAF2jYSNT6qbGIgLbYeYjCuycOM3SThNSISRgZqtsibVJ8bswiWNkvNk1ICIwcWUl2EBIt6ZIpVan/AU2ts+IXW7WWQvuKCdyXamSz3N3doKb+a3IICx+l0+LY2xUU0C0A3vkkUdee+11IhK/+sTFU0ZB3ZBoEVHPOoQyaZ5FGI/pvYczT7Rx30MHWzdGrVrFwrKigQCpy5Ph5HQO4LXvvfLkpz4Ty83/6K3d/c31xZxt5y/d83XmTx+hq+9+vX1rZ2l54uGK1GhMw3DKEThR1VcuX/59s9nNWjcODmbAcikT4Ngu5gr09v/m/+WvDywtFWBrHF/72Mc+f3r6jXPnHprP3z88VNVhGPbm8sCFdeNKyxwC+sjq6nu37szG0bKcYXnBg1f5P8Mc4e0bp488cImoFeciCIZhY2Wpnes9ooOqQK1y5fzG4fFsdXkyFP6tv/U3fugP/eOmo1554fkMna0vL1vW02QEM4EKiAn8Iz/+v+ywnRoUWSNhj7AWQIuqGNo9u9GElaq2AY8AiBiEUngc62w2m6hyKa5kNMxmhUCkSj5H1W5hcDJw6iUQiFFQyHyOWK0rJQ3jJJSvx4uZeehZi+KCJmndv/FxswfialN1Fmn7S2cdgNdpGbx4mrG91XHUsc7n4yc+8Yn33nuPDDARkU95pQjHmaucYiXNp7bgoKBmDnWUS8CVhx555+VvDJOhks0PUYE/azIMyaqvv/pd1X/Cfn31W79phsre0enOwdHhyawnq9OV83eR2u/Aa3t72364tb2PptR8JPXu/lGG3Espr+ztfebk5L4LF14HiGidecL8ifPnX9/a2p/NklcVmBCdtnybvwbg2U984v0bN5aG4RuqjzGvjuPG9vavPvggAGY+ntf3b24jig5ifrCWUg73jwL5IUg1hwc34jLK++BOXO/WhHx3wBylZt0nYp2aI0quv3/7R579zGRpqsDS0hKxJTXqd1/8TjjAK+aUQmGhII17s07ns/lYkyo0OrI98RHVvBl1SrlPgPXomqLT2C/CejBTTpiYi93ZWoaB1O64sSibK1siUlFLuIbxB42xs018+RrdCTWuTtRkpSO1uLIVlhH3eScRGcdxHEd7LDNHNIXyvGyjZstymOTkc+38/zXaXKva/RsqImYbW5jXUnmTyeSTn/zkMAzsY7KRy2g+bRVrQ5XF1tn8ZLXWsugZ4OhpM7v9ykMPn87miBuiLKpgtpDVY9mZvP697yVNUryOT2dHJ7Neg9UYKfoP+brbHc3XjTt7JtqhgFZDim18b2/PL3x76qnnzp+vtT62vHzfZLJEtLK0dOv42Kpo0NQdCvP55WU4v/j7y8y3T08L0QbwI++9JyL3Hx9/Y3Pz5OJFVZ3NZof7+6lLVdXZw4oHsm0Lgc3CRIwI4zuV90ofruagKqJVtFYZq4xjHeNYNbI65iom956czgGazecqApWbb71hbpNU+d5LLxi9rS0veeUnyOZRlVKGMgHw8cc/9fATn6Lo5xkKc4kG+M7Ei1ip7bK3QF1FZxcEKC4tAcxbjhJGS5R5wMpRIZ4Vt2eNc8sdVR+irt5c3WjeMiBBq0xWRcjkN19RVOlZdb+rS3OItIpE2t1nc5M7tGRJvzKUoZTJZLB0QlPNaHutIuM4n1sFhc9nJwINw2DtaLY5qX7pm3VRPfbYY+FkeBUUEawSDUCKHMoGbs9RqbVoeVlc0IrTFryfcijl01/9kXi//wygWF2e7h8emzH36isvG4J2PnjXBMHpvAJ49/a2hpL/QcLMP9CHPuLDCvh1Xs3MsOjvbDbLVvLrjzzyzfX145OTKVCIlqfT68zDAw88eumSTaIBsFvrlY2Np+67bzvueTy3vPyJCxdOPv3pX1Y9t7nJzMN8fuPGjW+dnl5/6CETjsvLy6oa7TBqyLTdf7C9i9ADmQUIwWjzZUoOA1bNJ+AsP7ed+iCHWmWsdaz1gzu7N7f3NUI7BuGtG1v23XevvmqE/vorLxlVLC9N+P/f3r81W5Zl52HYN8aca+1z8lJdaHR3oS+Vt6rqRjeaDVKUTTREUSBBkIRIQjAthiCJ9IsjGH6xHvQDzAeGwy9+doTfLTuosBU2YFISKNMXigwaIkiTxK27ujIrqy/oW3XdMvOcvdacY/jhG2OulSezsrKqu0CbwupC4px99l57zcuY4/aNb2RmQRSRH6mTVIXo2RJd2nxjT2d+rkR5pAgSGj2eK3apWetra91658EDR9Eyz/NUS0StzN1cVJBV/nTyhybQwYMTu12G3S4iDN/uBXDAOMazUPpYwzsFxCle5zsqwmPcSlWRt2Bbp+F9pSCFM0+9GFSa7p4I9aJlM1E2DxZmPUE6zmI0EXFHd7t+/drdV+9qJoQARHXJrqQmE3ab8cyzO7rRpKHTsy2LZsXSsjZRVYOj27YzwiSaal37AuD2y1954aXPvfHdb3HvnS/r937wFrJSnM90nC6/l0i+v2vMzzChMxgeHj6tmY89c6m176nquq5MP6rq73ziE9cfPHjp7l0FlnX9t65e/cEzz/zDWjFN/+bbb7/29ttXv//9k3me5/nTly715547e/bZ22+//c7h8GIpf2Wajsvy7OXL5+t6+8aNr37qU8k35efn51j6Ry+zC7MLG4WERwIHYFFfl4dlxCAcnlBUEfGPfeTK0MljR46MRly74+v11LrhTIsDeOfBGVW/o9RanrlSfv/Oy5+6+dJXfvu3+NmTebpy6VBUBNEtUUoFe32pfvkX/nIUBtpmu8V+3o2J3/jY/awafUYt3Ncwx7fjSVC0LGtf1zVcaBW4D+oskeTLyR/2x3YY66riTm9RRYYMPvaZw/QG+Mx1ZzuAjSPoKQwmgTH1u9gVw30DSuEAavbg3pYrrKm4G9LHbmszMw2sqSJR73fv3n3YWcubxIHzsAoViAngeYZtnvD+mQH89M/+3H/xT//R5dNTrVW6d2s8I37w1j0H5qk+OC60on/hL8oPvnV3XVc3e/veA3Y5//brb/2nf+t/bd2WZfn1X//17373u48VRaRr9G5/fcrrU1/68t/4G39jOF6dGFXgV3/pl9Z1PR6Ph8NBUwG+dvPmW88++ydffvnHf/zHtdZngEMpz92/f7eUb/zYj33swYMrp6du9vu1XvnIR5bj8d8/Pf39ef7ksuDqVev99Tfe+I1nn/32xz425V6hnv8f/smf/+t//X+ClCVza62LSM0ompndvn3bc90zEjG2RwCdb9y4Mdar7GKTtEI//fEfEy2nV58FVliHG9xg/jtfu3t+vpyentZp+p/9yi+J6Nv3Hrzx5htXLl8qWr77jTufuvVSJJCAwzyB8SQQ4y7o3cxEpXW79uLnyCbhuiWf9rmPsd24b/b7WbN0Pl7P2uYBV3IPF7d7F5He+7Is8+EgLlRvIoMAaxxzW7iAh5SE9xleW5duLB5U8a0TewS9Uzy52/KZ+ShwZzKMp+YYrewaFg8rUkRKqRs0JMmQxpe5bRWMlLswOeDE6AGY51lF0vLw1vtnnn+edFs8LodBEVo7D71hALt5wuK3VKdntHyTJccXf+ZPdWvWmnsHxAWvv32fnsQzhOABr7z8e3D/9t1XluNxXZYHZ8cHZ0eHv/C5z7l5683dv7Pjf3z0eqz0PtmiHmAswrB2H0u7K5kb/qP/+D+mANMs4gIdj8dvzfP/9lOf+q++853vvfXWejx+9Xj86vH4fO//0bL88cOBJ/oN4N88O/szqgC+1pqZvXPv3v/9m9/8z+7d+39P09e//vXvfOc7r7/+OoNk5+fnb775VoZPYqr5/7rHmb5p1L1jknGegBqWsDNzG4T1SCvsUIvDNartCrSw3nNd1sjTuMP9s1/4osPfeXB2PDu7/87bb7/1xp3f+efuniBKv3p6mErhbjCGDayzlOKjn7pOt87efQn49BbFPZzpMGvD/N12ked/ACC6GcAUYx5/w342s2Vdl3VdewsSOXhrLQI27q0Hocduw4Y3zj+MGR4H3/bMA7NQam29RWlJc4ONJUkjXofQEMdcHr4jlw0j1JnR4JFF2qxewMwHdlLTVtl/3bXnr929e3cj4Qkr2iPEjYipRDjfI66fuNC4hiEgWd51dmzKZk4OiFD3AlDVk5PDPNVl7YD8+t/51Tduv8wsxfffeieOyDjR/Am69wnXB9PJnkbX+Lj1vq7r+fn5gwcPCAM8Pz+/c+fO1atXX3/99b93+fIb3/3u9d5f+vjH1ez76zrXejB7+623rjzzzHOnpxPw9ttvn6/rR958878DfqPW7n7nmWf0wYNlWV5//fWPfOQjly5dEpF1XWutr9654+43btzo3pnzsG7eo6R2WEOSIR8drh22h1YR7HVXmuKRX03aqd2wHUAj14e7A5/9wh/56u/8trv/4N6Da5c+CpFvfO33fvdf/nN+7zzVLdgGKHKJAbj9zM//u+5uzfY7RGTLs2yyAQdcd2iTweNZcjnGQnTrpRRl9a9qz7zMwHLtRADubt0QNWyFPPJhM9KuHOu7S7WauZEvWsY8A2k78GJuTyG1tQ73gbD0XRiW/q6ZkSbXfTdIwN2z97G4gwnoh+xnoScz5iiKQkmwJbIxv+Rz22uvEYIXjCFlTyuRz+CJWA5Ilj/usd0t2+QC+NLP/txrv/Ubh3mC4btvvIk4dzg1mgLs3/vGnUPRKoUWI42cP/+XfrkULX0zMd7X9WS72t1ZVzim7Z/85m/+T8l3sYeLuP+1v/bX/vbf/tvH45F8APM8P3jw4N69e/fu3ZumCcBv/PiP/wbw48vy2XfeqcAn1rUApdYrranIN7/73d89P//eNGGev3rlSnpwfv/+/VorUw8AGFxsrREVd+fOneevX6M5pEUVgTFwiQaryOTiQKJTOGTrFsBKL8iuU4QDz1w+8Wjpkv0jRVDKO2fLPM/LsmB093J393cenFvvEDmZ6//mf/U3eaePXDq5dJiMHX7C6A09v66dRGiqIgjAI2QkO8V3BDqMpYnuQAdUpwmuGo/NATZvJUPOTPxOqAjgcB/yOWxC0gpA2F3IzKzUGljowWEJybypwR9KMlE3Y0TOU4zd3NSrsweEqCGPpWGmg55FuLLDJXDAzda2Lss6TQQebHWMkIiwl31WNjFC4xTxdI83qXd399aiOEnSjopze1AFxJwg9TP2jx1fKEKsVqBzRZ597tPvfO9bb7xzBlH6SjyzPnL55PtvTvdwBHDnay//9C1SkOOte2ehgRHb6P/zz//5DyOrT37PZkKPs2lLbcDdW29/5X/8V37tV3/t7OxMRN566y3aqOu6Uvbo1P3gcPjHhwPy8OakxdRdvoz0YFtrInLv3j26Kqenp8ww379/P7wnZxzcX71zx7pdu3ZtKLFuJpDr12+8evcuN4ao0nrUPEDHY5vZcTnO8yHEICNGgJu7Fh29EwEBrPXR98Pc/KXP/1ROmh57P5mqDzfbAcFUo7QzZox0jYaPPX/92ks/aUF3IZ6PHUgOPnZRHi5Z4oYhOea2rMs0zUOQMrRGavVQ6SKiKmaiqkQ0rq2XUpFm8djbbHQWAVEBejeCNOWhUnxuakseYmfLBDq6kgVPKuQ0IrxKa61aylBxqhp8B7WWYHYJasX0Y9F6X1uD4zDPo9wnTriMVURwWMIc5GBI9knVbXnSlKgWLAyDJWJbY7nhI2bD5RdI0TKR6jFPKR1Fa8GhHCmy4Yl95LnPvP7WO9abR7zE07WTqERzvHn/jBm/tx+cZws73Prs50bm+cnC+Z7S+24XPWH++8/+2T/jY7O+3EeqzP2P/vQfXZZlXdd1XR88eODuzz//PPGVkjWltInG01qar74jNgFwdnZ2//79/ZNfunTpwYMHpJhFaIwwibWUb37zm5K8tsgEz5htTUSA5is+egs4DvNhdxbFEXZyOKhKnSaMLQJAy/kxOg91s3Vdb332Jx1w8++9+U7RWmqtdTpbGqXtyumJJ0kPhscOkSI3Pvv5ojrRU2NwOINJlYQeqiXqIoh3qJqmaTeDY57mkURJfZN9QoZ6dMCjYC4k1oxxihF22u3t3J9xyvTYhOOwFhIJRpFfWNhpY5YscSkSXVoorQnkAGjcztMUVTCcbgupRh6oWyvg9B/okcc9BEjWpd47k9et99YaM13mnry8AwbA5ZVXgzE4bG4OjGy9SbSpwxnmY3ieVUm1Q+8ZI9KA1PCf+Mw1LXUYcnDPNhJ+mKerl054sH/9+2/VqR7Xfn5cHXjhs5+zjKXtI1iPFeYnS/gT/jpySCFOOjKvPqwsFf3Sl770K7/yK8fj8Xg8rutKQo9Pf/rTly9fHsg53RHxj2/0CJ/GObgXXQBXrlz5+Mc//slPfvL8/Pz8/HzELxOhoBTI26+8cvv2bYSu0zqNWrxIo27Kylk++9CBTiXWe4ejVraPL4C89eabb73x5ls/eBOtt/Nj2HEC5DN/9vM/RWPNRab5cG8Jk/sw1akWHksAtBSocsTL2j5987MRZMk3cA+NGi/ZBaqQobUxaQN9EI9tyW/q0YWQe36h/UOSAPeSZCYjDiBRNpdgjExhcsIo+sjAQdnmOxp6IszMLGvK5Lvu1HvUfEqqPkSqj/CniDqas6FElCFncDirDTNWbMk0b+5EHXSztTXKQG+NxiHPP0qXte6JsaJ7MGYyE/3GXZy7Ob53HI0stkqL38ebsZ04vff+sU9eA0TzCINQQyvICMEPxxTr+doAgfutFz/r7r33b3/7208QzsdeFyT2gn5+VJ4Hw85IWvbeSCpixoYa9h/8yn+wruuyLMfj8ezsjM7ztWvXPvWpT9Vah+xd+Dou/f289l/6iU984vr165/4xCfeeecd3rm19uKLL44JzLVw7pq7d+8qbavUmruT4qEtYWavvHLbx1YxDkLYCKi3jqKALcfle9//wRtv3XvzzbeX4/lxWT3TjdyWn/3CFwET4LXvvA73O9/6PsMDH7l8OikPAo34i5lbd+s3Xvzc9Zd+klsBGf8eDlfY8JHsIj6XcL7eessdbDTyezy2uzsfu2e0mfTxrfe1raNhBcjXM3Dp7tZtkOyEcIwdTrkKUorc86nb+XHwNHHimsaiBMCx91aPyyKCoiSwDAIaG3fEQydrbgjZXJk4cG14BZGcBnprjFhSspdlMfdpmtZlYQAqQsnB6OfLskRoJALIcRanQfBQoJtPw9PIRNBi5Daa3MST+0hBff6Pf/n3/uk/jjr1TkYjEcjJNF25dELMwFv3zpbWXn/rHo/MGy+8yG09Cj4fldLhZz72DRfe+djXL7zyj//xP/5jf+yP7Z+cwQdupL/5N//m3/pbfytctczcTtN0/fr18/Pzt99++3g8LstSSmG4az9p45rn+ZlnnnnmmWc++tGPMrLNHFXvnXGy8/PzEUMaH2du4+WXXxaRmzdvHo9H7Jpa+iDTzCcHsCwLEosPZzjFivv5+fk0FRzP7927D0dbl96mt87PGUWjuqBrEOhREdA53DA7mKrGDgf4Kif4nftny/Ho6UQgq9RLKb21sQpD9+Tt95S3gTXYPXnohmFTcHt15p7z0KGWOjdjNAuZYfI08i09vrzB9mwljE/OOcsrQwRKKe1dnrxG9pvB5I6dYOaei6cfBShibD8IOIGyQ5gQ1oAKvY4ILFH9ttaS1cF3tYMRLn7ttdd4m9GXlVNXohTLM5m1i0gnnhsqvpVx5l6Vh5i1Yx6t09Jwd+vNDe7ugqun8+8DAN5458H9B+e5Q4LZz3r/l/9y4176kV8XksDu0ezdjBtjFwsFPveTn/ulf++Xfu1Xf43zUGv19H6nafroRz86TVNr7f79++5Ot1aSPoKM8IfDgXjJ1trbb78dmUkzKvY//af/9M2bNyk2ThrHh+Wf1bGvvPK1DOxgk1iXXNPYLT3h2VR43HZ6QGutAA/un1nvJL5+cHb0oQ+SzMzMfvGX/+r/9f/4fwBw78H5+bK+8c4ZgMNUnrl8IlCIS2E+UlKy7d/4U3+Oo8bYLRl51QxB50zHASWidKlstGjL3Yvc55uuEsFeXw1H1V2BAHhbHfG5lP0I8jGiBuxUIECH4cI2QC66+4ZcvDDn9fTSKeM1DjaMKp5hD+xGOaRFIh62/alvXVGAOLpkxPQFwnOdpLDzPItqFqnF+wUgo++2hEwAbHQbkY+K01EzwIJU1BnNS4GNEQ6rUkT+2J/8+d/97/5bcXZYHXJNs0qunJ7cOzuHyN3v/uDsuAIQ4Kf+yE+31vCwb7lXpO+mVPevP/lXXnuSyn/4D//hT/zET8Ssx4TLLumGT3/q02wzzdk4PT0dHtE4qk9OTlT1ypUr7qNFJUSEyu3s7GwYupQTSu9nP/e5//l/8p+8dvfuQw8MYCAkBxE03H1jFB/zPLQxv+/00qVx9EiaSKeXT07muV46tW5XrqzHZaUHZ26XL18G0Fortbo7OzZRrd87O77x9gMKlQhOTw5QwATZ3ZZb5bj2l37qS7vzJrtlj/SEu7OvALsFZHuUC0+eFlxsLyqDJ2x1fqK3Vo/HbjZNE3OlvtuRskuh51a3bsZAj6Zi3/bJ2GCBqkUEcQs5Bs3d6zzN7r72XkRqGeouOqDSCkHWf/IAY3CYjw6RMkzZcJZyNOlqAxDVQvQVtd9I2wLRHa6yiQT25/3wz4c1vLGfpB3Sezc4uaKdTLiS8HTxLQcIQOSLP/Pv/PZv/AMRUS1JnigicjJPAxJA6QXw0k9+nt5jrfXb3/72exrJj16PldVH/3oxiCUSRFwWWAeuIX2/EVwZStLMSinzPLM1Wc5rkvs+/ADD4rWsT2TUdFmWf/+v/tVf+ZVfgeCFF14ws7t378ZmYtwxz1DesHWSOSlNm+FHbFs2jn1HMgRzqkspVy5dKqXU+dJbb/ygltKnELHefDyzubW1AYDKv/crf/3X/vP/TIDf+/q3KXXPXrnM2CZKTE9ypvnHP3MTRG4CzCOWrMunwiQyVFVrKeGWZ9kM/VTf77+RTlVVoNQ6hHBsHmahqD9YrXV+PMZsFPUeCTYOsmjZMMEOg9eMRXPGJNPI7l4zoCWpaVqWWyG4InvVom4+0QBxLOvKot9wZSViKvy1k50vJJanhUuAsQEA6SEEjsK9m63r0lobvTx5YpGSs5QiyURp6cFS9apK5vswdqqUIPFSSiy8SGVz1259XRsClJIP7+6ZkY8Uszsp6SUJjQQ4TPXyyeH+gyMEZ8coAH7xJ79gZsu6futb33w3aXyyiF7405PfPJTwt771rd/8zX/CsROkkcsdR/hA3Yy8ERPCAwlIfPIIW2bwMqSLcVOmoyjD9PNf//73/9t/8A9kGJnub775Jq07MLGEsHo++clP/sRP/ARhT+a2NoLbgzOGZy33jLmjt26R8FSVeaqAk3J1mud1XWW02Ea0GOEyRX+ZaKgZX54GJKZazSN6Mni7HPiTv/jL+8YrrTXUSsgtw7TSRWeNgJvbsq4X90yU7ckoyZLgoqf/LYimQ2EUOd0938AI43wskREs3a2ta9GC7ARs6VAIkvO86EMb3my1zqUkGJE5mDQTvPXeW6sBKHGxrKrNTM5otxMmxKY2B6gjj6g8jDJ+FYvo3b2v6/G4CED9YAkHr7Xu+Ye4wTWZ+EY6SEZ9pkgpStDM2EzsYWuD4kRkxGF1wDlp/wACfPFP/Knf/Sf/yIcDwXupArh66eSNtx+svQ1r9YWXPhcRwizRfDfZezcRfbLEvtv1+7//+//sn/6z2IIiSCN5vOHRc2Hgxmk/D/h6KYUblykZT8HYa+CeFdWvvfba22+/Pe65v/+Fn0XkU5/+lLjwcI/khQOSSjhjQr01tnvfjhBmybJER1Wrh0+oIqs7gG7dfKNVe+kLXxwDd/fDPF05PcBdg0QWKpBSACzHlRtxmP2lFof37hy+uZVa4HD13rtCS609yi09tJIKG4j13qUMgk1ua6Uhy9K9oeS4HpYBPxGYO4M+iSlUnQ/MwoqIEl6ys5bTReUhBXHXUjyY4V1Fzb2Kag0h773XUgCvPMMsLYc8sIFdzZTwCGIAbevcqMEgl3l8IC0O353D7sjcL5C5WYBVU+HYSYDS4s1Cdh4eye7b2QgDw92RGkEgDjNspaoPQfB4DwgR3YAWfe7569/9xt38XmFQgg89VVlbBgeBFz/3eZqp/+Jf/Iunlr4Pco0gFpXwBYsawNnZ2V6G9xI1Xhny6UmSuNe6+3diB/CgGD96t8f+vF3DjETEVF2L58xjF8VljA0kbFER0cunJyhlOjkFyjTPy9mZM48AVJUugTFmeQw/+7kv/JH4VoAa7srpASKW3y4SUcyPX7t187OfH3tCKHLBU2tAUejwQzkzVRW1uI3+exmPFVQZDz/OniyuTIGW3XOJwIPGfAqSiZCpON6iLDdkSmopVHJuZoYSfCUPP7x47wa1gsQy58OLSClVW2uZzmqtrYCTllPS/wHi+ykJmXSmzi+hvzLCIhAzX2msGlmY+zjBpqlmv/FNlYvIzZs36QdoluwP00TS8c54X2S7efb3VCMjmaa6gT3C/d89PBzPffq6JEE2tXBvzXq7enKgZcKozYuf+zy/fXNYHt7Kj9/ZT3ft/YKnfOceL41d0OjCNSznZNV/16949PVnn332yY80/vStb31r5Ht7zDzDSDoWCwAcd159NbAL4BaXIjrQe9NUzEPvqYi7tc4uZCDggj5CqXWIjLs/e/US1UNGkT0XLnI5fEhNtanR0VOoq1prwWpsFii3biII6pmtiD1DShFhRkYnwjOJKeG4IHQl3KHkDCjFnZlRyZbKsrtFSVwnSsCQNA0OyMPUq7WUzAoTwth7Sn030y2LNw5O99573xqpWIAzs0qIchu3HOoPgERnJxGx7M3V1uZu0SEFcYzpTpnwmW/cuMm1GAfbHhzjSU0yjtuWIAffBcTDcGcewgKrFXUb8fD4xGeuZw5gvwcA+OWTA/cZchLoYT4NiuPJ4v0EqbhYSAi8+eabj9WBj/ZSunA9aq7v0Dvy6F8ffcInPPaFnwcqExFxAaOkDGQMg25osHxCzHNl0DI7rlHziIMvjoMhwuO9d7j98n/41+MZHFdPT6aoQ+SScmiy9v7lP/eXQ4a3jWPdXICSqSwCBK0nXiqffzT9sSTWSF84thWSaitDWBhTQotSBLnr2PcwIhTIKO+j2x5Dhvi28dd8kQ9TmEAyj4hDZys2VREdgEShZbSjrqbo9tY9fbm9VebZLYZfTHudtX5Dj7XW3C2IqQcJS/JXx2cFvbflePzMZz7T0+Ru1KqBvrQYfZBvhG/JWbDdBHH4WU45VnCHT4I89/xNC7Ab14KLiKL67JXTqTKsKX/+L/+PeAw9Kr1PVptPKcnvqYQf+/qDBw8oxj/2Yz/2nk/ynjffHb/v47MAvv373x53kKRoysSIeXAPe0KwtvsUUXcvhRUIDnidKtPHlJxaNMLFATNksCn8HcAPcwnIVQQmFBDLmPf1Fz/n7oTutjx/Y5GVHZETIwRnwKxkyJq7xUejXrhnm44xIUPUWPmTMGxPrFXIdPauYZqqmdsmOY+clYwgrtl5jM5zS+QjOPKhkGlHpMoR2RN/BD9zBGn5OXZtmtlFAk52wu6dkUxJjU3kc+8tdF62x7Hezbxk41IfcMstLBQBplJrKeWFF17g/HIJuchIT3bMYhpveXzQbFN1Rw8QLiT4xso0TbXU4SLC/Utf/ndkb8ywLE6UpYX8zq999Xe3NXs/EvtDvu1pPvueqvgDPMCFYb6nBWFmLbo9MjyqIpJ1a5FWIRsBKwpEhYrB3Jo1oHGe61TNnfzHzrB2CkMpBY62Njf/yS9+ies/13o6VeIKrXfrzWEQF5WPX7sJgC60VqXJnhZYymyGALpZ61FOwwCNRJGtjM1T96UBIt0CH9atOXkzeEZxS/a+qSV3Eam1uvmyLL31NFGG2Ke5RK6gZD7g20qtZI2EYIey3C4za9b5/Bt1taX2iyVKjVtKKXUArYsDvXU6Np4JrvwOmlDeWuMPa1S6VU+jy/KgS2YJrpwTn2y9P//889bNg8+eYf04mXwA3zR6zAz7I20R2z2/JDSFjbXiX88MR5gSGTIQwUeuXJqnmlsW7t6t//P3qiJ8v9cFYXjUhEZmhh8rcnzx3aCdP6qnesKf+Ov1Gzc0z27PRpPxG8KmZFzjtddeKzVqaeZpsm5RagaHBaadGR0ezG6+rGuqytDM/+e//b/jt5/M4S4i+QBCPav+zJ/9S/R6mDiQlBG+NPwxMCXmKJKdzTACn7iwf7hPCuugSoUIaVIRmje48pKzJEyQkdtzwUjabU6HBy+NZ8IcGVEbz5/xF9k9f4wACStitXzSwLMVqkqKPg+mUkYVkxbqMVEppfK9JRuQj5vwUfg6a9NKRMNt1EOYGbIL3ggLMKfJiMi169e3u2GcTb7ZHo7h2mF8NWtfGISQOHpGRKeWKBmDyE9/+U/7htDabUvB1dMT3u32y18dCaQnb+Wn/PXR633p5Ceoxye8Xx4a3pO+nRGs9/yu8es3Xvv6zVu3aNdEDpdnvUbfQol+VArg7qt3v/711+ZpPpmriNbpEO6eap0ndxQJGrepFhGZSt0/+bquuR/k6qVTqnaUMvr6qei6tudvvmSDiSnbgGzhjq0ixuHIKCzrBJz6oNZk1WT0a1ceVGqZ5qmWwvRJ+J/Z8UsylSS58fj8RYuZEypDyczzxSTr2DXMU8EuKjj8jvDwU+15mBJgS0EVUdtWKHMu4dJ4Yc+qIT0e5PfxUQmjiFHgkbVD2mPrukJQ56loEmrumBAqC3f5fks4W9rk165dC4tANqDaCDXTOx31iSm/Ee5Mf9gRwBiuXw7CDMBzz98AIj2fHxI45qnOtTj89stfoZZ7Mg/W01xPlrfHauDx4rvJ0tWrV3/Ip3q36+lVsVl//tq169eu9XEaIsCz3CGexZ7c3nfu3OF+K1WQEjkVNesP4/O3a7Tdefl3fxvAYZ7meaq1FqIUAQg7d+pHP3UdKT/c65YXdS7XuPe+rOvY8623IfOITIVuIpA6lr5c7hJ4N9/xUgQLl6cNu7uIw2mt8VUHhLY6YR4apYa6D54/3GbNsgxgxOGXZWmZJuy96z5eN9aJz8FIYlSIMRqUzjQF1x3hiIpsZ35+DWNxNXik4+JPuptl330vGBAwM7Pnrz1vPU1ij3g6h2duEDBBFZ/MD49becQRg4jLs8iQH/nEp6/zfdxbQ7pPpjpNlbvr7/3dXxsRrCcL4VPq3v0B94S7Pc0bnv4j76l7n/CnJ7zz9p07TPxAcPPGDcuq8u2z5sMANiPk2F577e7Xv/7a1hrROt/XerQssoGcldBo7v7y7/4W7/nsldN5mrSUEaOhtvRu//Zf+CXZJ2UjFp5VscnGTp88QlZw1fhG2/D/u1i9u7u33kaR5IAGxR+Zz0sZQao6fpqJGhop7NguAFLrOrC25mPzhm8Y3ANDgFnkOIA6EJnmqSQoJZK4/GMJ7tmg7XFEq/jj8UgWebJ7IHz9GIaOzFhyrvNx13UVyDzNOjJnQCk6TdNhnh3onLL0pXtMt7WWlcetP3/9Ws9TKEcApNGiWhI6Hb4Ev8gJkObMuvfel2W1ETMXAfClL/9c+uOhtnnnk8M0T4+pW9gdQU/a2U8vAE/5hgvvebefP8C1H9Fjb/UeY8k4DeMI12/cYJBCMncVq+BRVct8hDumot959TbjuHyQuRaF1BKbaKqBnfTMP/3X/5f/ExCH+Mk8A8IApoiwhv64LmuP4rYeu8Vb7KTe1uSVIKvE8NQdKlprhYTS6mbWe+xloix6N4uw9rKsbW2J4Ev3VbKegUmSHVnFCCG5u3WyiCA4TFQO8zxNFe7GTIoqNE192yatt95aJ3FAVuN7942FoyKzaQ6mgsI2GDmrUoolFgu5rdP5eVjvxQxGSbSqllolT9NxWETcOZ1g37xYJ8EJAIMXUQFuXL9+9+5dqA6HIZYyo2wAyfWH1+yejr6MsF4p7ggYzV4uH96R7Fnz489ceePtMwfu3v7ais3Jf+yHnvIaH3/0PhdMaIKxmAre+4Hv9u1Pr9Wf5iEf+/Njf3WQaxGBnwNu3rwJ91fv3uV7ZJ/9iEyHAjg9mUXkO3fumNknb1yHlmVtCBYXEwSo3uNE3nQdgCunB+pegxEGC3e4XXvhczc++3nuP01OKQTzcyZyLUsuAGQPPUmzmVVt3EuWdIgBvMqpZyImadjDwBMJUH3qF54MGF/FOt91bbWu8zwjXLwwJMNN5IyJQEXdN0skyauHZD86BKXdCw8CQQ0eKY3sXimkOdpoeCSFJ7+VHvxmrwciF9M0lQ1YT/PBPC2N3hrJGXw0c6HvwWO41BqQrNCcYyuMoxSRPZexuRjU1yT92JJvxGdp0tUCAH76Z//0GAKdkdw3uHQyA/7q7VfeU2if3nB9v2948gf3//5Iro985CNP/saLD2C2LsvampmnHycO3Lh+nelfiJDlaJjEl05OapHDPI9gx3fuvtbWdVsl+s4qvXcKHk2321/9PQAn83T5ZHbv1laYU0ty45wtLXUy4i5pGNZS2cV7eJ5j6sxsXVbiIkSUITEOleYgEF6bqFActNQhExibP8V4DAQC1g9TA7M7BK3o/BSnMswKpzjQbOQQNG6p7E82hrDRV4U0sWKJIGSqcZVRGxl0eDzmjNA2c0tINyNMcIcYtCgc3TogvZuITPMM2bZa70YxcrcI5W1qM3aGO8Y88IMq+sKtW6/evcsnGlgOLjU/JZlYipMPXlQdLh5FUbEqLDpxsNz845+5LjIeIcpa3G2eorARgt/6rd96t239nrL9w7/h3ZSwuz/zzDNPyVO9O7If8+u7fe97/unrX3+NVEQDs8n9pEU//9nPmtvdV+86vNDNid7A/uC4fuf1N62bKGDobl//3g+e++SnS1G4QwWQKtBWUmXqna99lV/8Y89ckVKlVLfuvY+gpZTy5V/4y6GlIpC2qSvdVeRpdtvBKCEuhWeN5sxkvQ4tPgARrx5u2hA+vo+Osew4pUUBj/507i6QaZppzhPpVLS4h7VLXwOJwpC0LiFwccrRiIRj0FOms90FlU19BXvJ2ZZrO2Si74si28S4QwEPpk3A43/ZuIzMt+i9I/VqjjGqjtN7jeNkbW0a0cUAkYXOv3njxquvvoqdVRzHGGEDpXiMfg92C/PJOTQI1CnuKmrmz33mhm+EOME+6lFaOD84XwDcf/P79fTDivc+4RpJnSfI8JPv8G6nw97kfr+u70MHAYSFCkqixuBOCRHoXl586cVXX31VaNy5C2VG9bis7mOzwM1ee+2um9+4eVO4pQBVoWJ3sV//1f9ifP1hmgIgIIHmE/el2c3PfT63kw+FwT5S6SrCdo6biJS0C2TwYAgE2T4mC7lSpQJAJq9DGZs4WDOUh4W5CRQ2kpP8LtUCLWrd1mURzF5YzRrZVgGcZT3Oyk26pk7EVe99muexVBQ1jcSViEgk3AaqMMTCAUTNLcgMnYOkXCAbxpAj13fESMR/1UBEuEOyYd0IEhJi4RaoKTosRfIgHzPLbASdkRdefPHVO3eAqG7O05RfMr5qjIIOPSRbro0dg/DZ3R1f+vLP/dZv/IP0upAVNnjm8un33rz3WAH4kV/BY/7+rwuO8Xvq8wufvfDD0/y6v165ffv05GAEmevuq81HSuJzn/2sqL726m03Lutm35mP2jt3N0C++/vffO6Tn3IyOUNaD8wDF+Vknq5cOjE37wziOLibu3/8MzdVVNS79eTx3tkF7qCO1cEQs3W7z+2TzetlG4VkZSu4/yBmjnDlhIrWdrrEaZlyd5FRaJfZmep07MfW2jzPZh7ikIVRFAoIXGSD6EhU4GIQpIdMxCgoxrW7C2BAScWVmhzIcm2wihCiEC2V4S6mEJDsrInejr6hU51UdWyr47LM0zxUtWQWICUqjpiYHIAJ6HicHOStW7fuvHpHocNGIgEQRikmWC3Gb6B3EjdnmQsfSUUgxTFkWnjsScYMBbh8Mj84ru3s/r8SDby/3k0JP/q2H+Zbnn322afUvePXj37sowBit3iHlODv379Z9Nkf+2Pfv3uHQKKAFlln1gMp+g4cj8vXv/711vuN6zf4Si3lK7/zL772ld/hr5dOD+OmkW+Gm9mf+Pm/2KyD5iGCxmQ8MXcUqDaLYkcJkH+F7prsJi9flCgboDTaRFTE6AA6uKPMjK4pOSEZxIXAeqc64SgUriKjqRVzPYQl996UlrxHKFfIi/DoKAC4S9g72yiUMWHmagf/p2TBoDAUUTSgLTkd7m49TJKiOk1Vi0KktejcvTM9AMdUJ0v+2NTw7hlLoAoqiW7JCGGcipKrBsGtm7d4Q3dnBM6DZySpPDg1bt06oie1iEhhc+dcN07Jl37m57a0wraoeObyKR72zz+8a852vvvrgjjtzd0PJqhP1s8XIBQXrOv3UssGMDlHtWPRahDDLbWPXb/uNOiKStFap8omoHBmYFTl2Bog7n73tbu16lSrFg0OdPdnr1wCkqA5KhBK0drNrr34WR7U4+DgHl7XVbLQghFNJHfadiBim1V6UiMG3ns3C7S/Z40Udx3358SUsoRxPiKmnla7CDiKWioTqCBDCL8ZcLMonqVcZBCbu94TcTwcwwgbMfifeSItqkWEMMew5pEpGgElIWN3Dri59d5YL4ZoKRxM5AB6ayoS5BvumU/vQ8Gq6oCr0drX4OlnFzWIUP3mjI/95CC4+vlr10iuG7G1jf+a0bxE0o3g1LZAJBaPi2VfH//0dZaY7e6jInLl9ITjbWfvvIdwfGjXu8mqu/+BIbGe8Ovv/u7vwlrIqnfKavwaaERDDxz/czdufvzaLWR8gkmY3no0qzc/Oy6OKCH8+tdfu/vaXYH86n/+v+dCTrVMpVhi3c299+bWP/aZWxG7jiM6Y5yqh7GpAGzu4TBZh8/nwV5OuL2HMvDhbQJtbWxP6VuNftws4BYpIBGBH8RykauCqs6HWUXI/mHdQpRa69bThI9iaR3qE5inqWQ+DEBC3baBVIRhpnH4pCcwpjhOk2y/QoCEimitcY/Mba/LGkjpjayQY1AHClm/MgpqGWTnBMes6/AO+AxB9QnAVQ3OfNStmzfvvPoq4D2QILL5EukHyG7DZe45u6TnZMPx3PM3v/et18TyU7yXyzNXTt+492DJ3vb/qq4n2M9PVsUfzNh+eukF8IMfvPmt77weM552mSdJC1/kjhfVeSqnJ4ePfeaFH3zjdvPu3Rx4+/7Z2w+OgLPXgTpKwJsBtDt37gCA4zDVw1TMRv1GZCXh/m//4i8PY22o0xGIAnsOijy6r4Cog2MeW/aoZsCCOC5GXUoZQCiP/EvcaIvzZpyM9yGuCRlQom4qtRLrXLOlk8OFbYIdQ2p46T4PzGdXCSHiZxlc7lmokTGjrNaNIyhQVu5EY0V2i3Q4VKIteZVG+Io2KxWupNyO6bCkbqrsG1M0nd5dxfC2azJM7aaycXncvHFjTAxV6wikSb5tC0tSsWY+IKBmDlH55PM3lI4F6wqR6yyYagHQzj7caNZjTeh9VvYJ1uz+esq3PeF6X9Lr7i9/9WUxJz2/mKH3+M/M17Wvq60rTU9xF4dCAXz0MzeEsAKRtx+c3z8/3js7vvHOfa5cuFbuKvLq1776ld/+lxS303ny6L4LRGi3rN3X3tMfc3gU32m0+o0GgppA6G1fScp3xJk841siAWkKg3wQhnnyNMa+SgVbskU2oi2opf0cahAYb49+yK2tiEBsCSAGjb3hWBLvFK2Vai2FiWjy6QxR4pFUATg8EU5sjTfy1OEu8tSRUmrKlzvJ+QDGwKLQ0Vh/S2J3HpgUHc8wElVf6FpBAtLLUJgxzw53X9a1JPjCzJC1j83M3a9du3b3tdc4DE+Vvs/FCyQZvMDg9XYo5UCeu3aDqTktaruCx2cunXzzhwsLPeX1qACzy9m76d7hjPwIn8Hdn3nmmf2vF/766Pv5g3kvUuFZAM6DGHCVbm5mUhjgcQDdG9ABFu76d+7cXlunKKmqQ+ZSIJ25icJdCwD4yJXTqdZEQQDwbr243njp8zde+rw7jKzrMVEQFAjEiRcMu2y/tQA0Ms7HeNB7Z/lxzwA5N4mz2a2ZqKiWAE5bH09YkikSCN44jUhx6mlBcYF7F6mltLYxePN56I3yUbeaQQ4n0VNFciCZHB1PrqHd3A3eWx/5bh9IMQCAqk6FmKxBCIAsO1Rlp2MIpZc3H6BQhgslTZ8EOYaU5qo81KTD4VS5/JAkvaB1a71F5Am4ceMGkoUUu+gLp5iHhLtbt+EAiSAbq8Uz/PSXf84C693p0HDwH716iQP5V+UGP0GpfjAd+zQff0/FO16hbRb9iCwspUCbizpcVOFCYH13m+qc1p8A/uPPP9/6VpgNd1WUtJvg/v/69b/LL7p66RQPn1fUlvePx6EyJQHJxGCklRytQ+HktQs0BrLjB7+Zu2Hr6pS1TM5AaWwtWO/BRTsmgbZqgjpowbLCSNMCRBZaTrWWWqY6uWNdVj53gLHz+X2r3xLNxoP8ouG7j7oLgjCT9zxLKM2t9baua88QHGLTx4/x9pwRhvQ9ctkoNU4pVa26afJ5muZ5Zt/EiGKkKxKSlfFn5B9ogLt7lCy2bi2jBcMy6f3555+XzQUbgW4gW+zsKBojAcETgbdxOKGz5HHiStDqPjk9zOmrfKjXY61oXk+Q4StXrjz64ge7xk3elxX98tde4W63oH2Ow542Z9ESqTy4uylQpgJ02Ip2RGvnZ8dbt269cOumJKbKwyAT333VYZ7mKWAXcNqoJoAW/dk//0sABDLVOs/zPM/sUJtEPnGm0ycvCRzgncdm653dXbr3LdrKhw6vMAJG7u7ezXs0OVjbyiqJwD+KKBmjhw9GKnazqP5ToX0qjBn3RptcVefEL9VaSIAzzzNrlWPDS7rw6QkOE113LkTa9AiYSJQXhYiCQLCWXgfPNt6LJxcgw6fXzFwVVtLXPAAIdZ6mIOxm0haAO0+Nlr40EBGCTF6DIxnppeEPX7t+3ceae7hDEQDkzJYBCuHTjsO2A/jSl3/OwTxECD/PktNpohvczx7q6PdhXxdgyU+Q4Q98XbDAJc+/d/ved/mVRuuWv0FqWAiM8UdmC8y7WakTIMTrQh3mqqJaXrh1S7Kgb+D7i8rdV14GcDJPh6nucO2Aw8zOz483X/q8AAQKCzs1qGoplRwUGj5wbDDBsizMOKxrQ+SNqe50Px8yTvv4Qrr5jFhR+0bv3+GUSlSeF1FKSijwgccM360od37vdn5+tMjEbmMrmaJhcqfUUioZZVSUUbrgD1vX1cxa72FkZvRMuPUJAZdgDHM2QU3bFvm4GrzqZufn5733SrFkX1Z3T5JXaBDqySiOGAfD6H8HFxEmgTgL7pGN4GS6+wB5I+m1zN3cBLh544ZAJPm+wkSn/8ApkXiqlqWVnuE9Vfmj/9afiSnUaBMOyDzVaSpwfPbzX/igkvJU16MamJ2ixvVhyPCF6/1L7/iB/mGgisjhFlUr7hJa1WstaXIBEEhpZhlt1GvXr50vq5klmzFU9eZLnwP8ZJ6maYqCCa0sOVCRT1y7hRGaUiCrxMkds61mFpDTjh35wp5ea2a8JGNPycghkuWQW7ENgzZhRAdN6mB+p1umZGgOM17CjI9dCwGCK3tdV3rt+dXb2bd/YHrakvU4/FOphIKY9Z5hKdnwhiWz57xaC8RJ3t7TLIkgXmtNAKr+cUh6Zn0BJGWJqjLTl2oyI+xOrDk58tOfFZGo8orOqFsUkWEzRI4+Fv2FF164ffs2sksNh3BhLFESrFrCI+Y5IQxwlmkWEYvqfwB+9fTw5tv3P/f5L3zxj1/9O3/3v/wgkvFBr5OTExHZmgY/Esd6X5L86Jv3rzxB5z/h19u374gLgytunqg6sCBMk0/H4Nbt/v3zBw++RWOKLXWUVXipwT/96c985Mrpa6+9lkX2uPXSZ29/9feuXjoJiYGq+Fi4n/3zvwyMjApGaI+xjezIzbcQieGRs+QJ3rszgMydHDa3Dx8diNAX/dHRrqVbHySTyLH4DjWpKuva3b3WWkR7ALwYCgJfn6YprA138ezQkFbxY8fC40A8eMJqjoXZpqKqIwKLMSfZjq3UUkoAGCX1GBkGAazr6mZTlP4CmflKAwtgEWVRkcJcwu4boo/7SIJJuCsRLnQndc6IzsUeoilea019DhHcvHlDs8DQHtrlIcCgxRW1HVFF3M2++DN/CuFiNevNokGznMzzVOtnf/LzeD/S8n6vCxp4X8nAiPT4df+2R7EcYw9duC4YzI8K8/jgmKV3e/OFX7tlXAdJBUo4xVSDCLGwJlVKEfZ179Zd0N0SURsWE8Owt27doj1Lm/5krpdPT1I/Ecbh7sYSYgCIDiwAs6/B58qz4+K4NUmuZEtdx4hsT2GTfFSamrWOcGzs1XBEyW270WVQt+3Lh/IfM1vX1nrnbp+nCcC6LGDAFaEZx7eHVs2xhHmTa2lmqlpLPFhxVhcoVJUcdADcfF3X3psANVi3ApVG1gGataQMMGCaZ2p85NzwgMGOVah77+EbuFvgrqqWUevABwijOXi97cK+VFWtdXjou10qquXmrVuxWiKSwGhzJycoRllZ9NrSkX1+7vkbnsl6SSD7YapTLUX13/3Fv4A/qOuCwfwEGb7wyoUN+5Rf8ZQ3f+yv2ysZGSq1cn7pf+W6cJsBiHpd1SBy4NJY64B3c1aPvvjii6WUFz73+WcunTApoKKkX9VSIPKxay/c/NznkRU1RAEkbgRDH44njKiwIN3LErUAgIzi2f0FB1Br1V1rtXx85m/FWSCg4UtblBt6by1OJYhmyqOWUmtBUDC61iKk1DHbmjaoQgaZTMiguxssnEwHNviTR6wmc7lEisbFNxciLchSm6HmTUWbWbe2NnNnUzx+ZSdlLkeY1JA8hGBQjVYTYf9To2bXhRKmxrY/qKKR0RKIeBo2GlTVQ80jzfdoBjW8DjiEO0BTXQNjOCIC9098+rqoDpue3zhPdarlJ7/wUw8V3Pyor0d94KeR4fdlQr/n9QGkF8Bt1odlUWj0osodae5RFRgpBXVHkeqg2Rh2qYrWaZrqJBl0dPdr167dfPElB2pRczfrvTW37uYievOlzzMwhiSdQmyeLGgDSBGVOMTYbMH3oNneJVdVhn8WHS0ddPWQUIi06TIYHDqpSLEkxKPUllprnYYm447svWMQIZsLhP10j8djfJ3DzHi8WJZPxYlsYNS9VNWAOciI2lYz9+xUkKHjjBwh4nfu6a3k7JiZFoX7ui4icjgcPEVORFhuwrsFAMMiuxXvycLLDOX3ta00I4oUUTEWa2wdGDzaOwBuHZAMee8ShIRLm12/fv3Vu3d1G0NOY0S3BpF/2tgOM/vj//bPz7Ucj4u7TfM8PvHFNVCyf/EX/8Lf+S//q/cjFB/wGj2K9k7vvgP4/k8fxvX0wizBABNqNaTYxTQV3AhSpolpO2rEEWiUEK7NdgVw88bN//R/8b/8xitfdXdRMbO2rqLl9PT0p//Un+Ob+2a7Mtkf5nYmYuG+9RzmlzbCjxH6GAqMNgMCN0//lg3T3KAw03zKMQNjONRVlGdIPHzWNW650mEUABBBrVNQzMJb7zyGxnAsnvCh4bi7SXi/Qw2RlCzGZ+7IFuwjP0tPvSffPIZ7aaO7uTIgMaBwZAYDg2kQbA7LJjN0BpD4jVIqhivDt2Y6l0GpUWmE5B4IHGk2y+Dzi6pbv3Hjxt27dx8ZDn8I9vmcyuDoW1v7+V/8S3Rrrly+LEkOfH5c37r/oJnfefXVp9j8H/B6bFXwk2V4O4B+uMsfLo14suK98Mrt23deoM8SeT7GOwYmITLqQGRxoTqwsTzIqXbiYKUgQbob3Ivq/+BP/MzPfPln4UZf6/j2vW6OUn//9bfScPXgJIaY9xBCd5dsoULlu6sZFJFSam7G0KeMD1G1XhxO9rjN4SDe4EDQSqe5DtDax3Bn0pSz0UKEkEwwIqC923Jcaq2ef3InN+B+OHhoOKAeFTJ7VDr3rJHd+wP7+M/wQsfxyVPt/Py8tT7PJQhK3BnBLxIWNZ+4ewfQkwWWt1dRN+/IQiVkVIHzI8LhAZud4+Zj2eCWkHZ0ayCnT3o1QcTz6qsZ7h6YL8AuxmwkCU3unZ1fngu4gXqE1i6dzPfPj4Ds7dgP9bpgP+9l+OTk5Pz8nL9evXr19ddff783fOzFKPT7taK3K0iXHABh8601hHZDFui4ahm5VaqpKlJL6d3GrW3UmQFTLSEwWlEVDtHS1/N3Hpy13sVMCVuoBVkJQMqk7t3cJI/u0AXq3ntMKR96s5+1FulhOPp+OJJMjAi4MVnvDAbuGRUJwKAEpwv1036uHhKf9CsBTNO8rg/Ozs8uXbrESHXS0W3yiNiwFrqUNnU3VaVYafiOmVHjfz4SKemKjrEy6dfN2JTJ3Vm3pSqUXjwcGKBR0butK1OwFuwntqHShvkd43RybQ9CExmYb2H6i9Gx1KIqUdK4PwAA3Lx1czPWYl7zEEvBEA1wqLm9de+MyYY40STSm4e5Arh959X6oQGzLrjBF2R4/6eTkxNmiYnE+pFcH/nIRx6V1adRxUygChDmKgP7btknpXlgA0WlUKsJYNadrQklONQRtBDeW+519zpNtRaoQoHu6MbPv/XOg8QR5OqOuhXWnGmBY20r0Q473E4qf2x5kjFYN98FqCOKOWzGTPoYkEXmeEgZ86J8YHBoIQAOw9yDCAuhe1T2o7cB+x13wQjFeZIlm9myroHWJxW0mbkpJMoAMKJKmQbqlBNJTmZCrqM0z9e+llJOTwlYCopAf9i95LzQzXbWM7VuFi3fonY3rSDPrHJrfSP7F4maJXJW9267nRTxsbGQ+RF/eES5DMweh6vcaU3kuSiQs/Pz89bn+VDrNCWnl7tPtdy+8yqSaP/Dvh7Vh48Kz8nJibt/6lOfeuxfn/LiB5955pkny+pjhZmvvHL7NrmQa621VtWS7fwCZ84qGpWSHowwdtotbNF9jNfMj60ZXIuKu2TaCN3hBiFPk7RulRlEUd/Bo7DDCQMAxMxbYw9a67HvexSujWxIVNlFopISWIrWWqIscQtow33grnRgL+gcjy6GnsF3hJgFkiO2rrtAaFqKyjzPLJ5n/yJBjNh2Iac0vxV04FtnbqaZwVEFUvIAM7i1rmT69WhA6q61jEydC+BmItLXLiKhOmR3DjmtWUfAMMKywiiGdtRsDO6RAIuPttb6KHsQOLC1GpcsmcowgAcHFjx3Q17pC1g0CpZksaZKpT7g5XSfSEpmfn5cSy3Wm6qK6OCbPkz10Z39I7weC4f2h+1nPJwoOj09vX///ru9/+mvT37yk/s7XLjho4904RWW6fBnVXHyQg04HRB8obsPMr8ylExr3XONyInFfXU4OaAUiMINZnAT0bfvZ6PzcUYjtpGICAK6CaCW0rOQzh0qzCwWi4K/YYsFAb2I+EA1hluMwj2QWcmxiwCMdDHSHB/GZI9oeYSEREqpRVToA9I6pohNdeqtr+t6cnJibpqW5jYiAUckwFRrT+Zmz9rG6uRQMOut0zofyZVRsUFy6k4iDHMHWltba9M00apsLYuzBLWMBn/eei9FWaIYEfDeZWeocB0bsanMD2Wexz2aU1i2U+D/MUiwt0w4YE/WWMC7A9lNoqTbPFyvsI23pngeb7f+4HxRUXcLPLaIiJzOh//6v/m/ATg9PR0u6B/MdUEmL/z6sY997P79+4/trvTke46fP/nJTzKC9Z6y+tjDK6zW4GVK01OgWiBhTzvlGEK1MqSOzHRmxgJ3FVlb6924uvOhlqlSztAN3t1cSr13/4zsMbXWZh0pRVvaxn1dW6nFknnCVWOl1SpLLAAkAGP45DK2oztKavWxU4ts6EOPNvRIpQ1AOKKw7nqZahEdqH44VBRBJxFBGaoWwNn/ZJ5ntj0AMHAK/JoWzQAdTO6oBtJapS5rE6DUwibAdBSURFtb/Cikl33NVaSvTQSllNa7JBecm5VSk6dTRDDVCkDrxmzSsgdLGrYAUKfq5mSxqrWOeXS33huJaafKO2dgM7szuDuzUOZ+enLaW6MtRHb4qVYfJMCDR1rT1Ed4KiqydhPI+bKeH5eoBc8lhGAg6Wqt7UOg6RgamO7oE4R2L0gicv369XVdHzx48AG+9MqVK1S/H0Dxhgn9ym0RaSurxfpUa50qA7rbJUyi9G79/Pw4zzOVBDMUtZbZEMlPBo2CCZj4ZsA7URJQXa27qJO6mIKXG28EQESEno6WzV5DCeg1TdFwlWtR19h4Hi0U0i7oZj42nia8fwuzCBAbr5n76cnJ2HisKBrZaT4THt54TIXw9Vqn4/F4PB6J9CylULah4QmKRM4VAKsHweCZCCBaa52nqRJcQhOXaeueCC8ge5ZCi4qg9b6sy7o2Wu2S5wGPMNkt8KM7o2oE62REJCXgMUi2rs2QgWCUVmcUWTa2SpgFi+1Up3ma2QauqLJ0cQ/PROar4px1ZyEYY3IOsA2Nuz84P2aaKgxsuP/eV1/mrT68ONaFibrw67sqQPcXXnjhE5/4xPv9iueee+6ll1569M5P89X7V1TDv6q1OhAxilgrmMMNrVtrHS4nhxMR5XuKlKlOVcqwpLSUuZbeu2a9Ltx97XBILaKyLjwr+vF4pAe1hT9T1Y9nC72fVDslVdhDG0+VpHOe3qkkboxqSZPnNT+FYSzTlp1qnaept0Y1UKdpNFVN01XYN9clCmyIQkaYgV6nam7n5+ckVPeg9oRlB4YL0z5Ocz5IFfHhw5iZWQfZK9h4BsKELQPZDPwej+dufnJyoqWAWLbWiB3x3stIveY9+dABJfd4xbMx2ggyDSrZ8cEYgMjg7+MwZNxZgibEwvc3CUsHIkJSpdC1Ox4Dz8nl/a0bBG1dBVJrPVvbsyw57J2+yu995eXHicCP+LqQCr6wWniii/vcc89duXLlbvYles/rE5/4xHPPPfeBFe/+urBMQ2kAcIcKjPSgWQwzdIOU0r2LVhJiOXzYw86D0h0qMk0IMh1/cHYuyG7yORXce2trGpcMy441PYHwEOlm4gHHiL0Hd8AERPLvBxU/ZdaDouiZDfbMJ8sIOGeHIIePcpqAb+90mojUWkdoiuMuWtx8ba1OE0dK5e+tiUghaWTeg7M9BKqqlKEtSUyZ58fILGW1Q+8GN7O2NhE5HA4qwjoDpIoTYX2HuruqtNZpzWrRbt3Mp2iVpHwULj/DgKraepDgjWfls0XHhtw9DDaW4J1HThxsY9nf8liDsgSjSBiAYCsiiQokB1BrPT8ufTv8wppCys+H7QY/KlSP2s+PFePLly9/4QtfePDgwfe///0LFvWYukuXLp2enn7sYx978pc+9pV3e9vLr7zyws2bVHiaFA7bSpmbeakbotjdtZSMx2r4Q6xXC1pvTEXLVCI2VARFYY7e335wPmC5AgiBx2mgRhNZMBviRWVtq0AiW9M7CG9OROAQIdpiEaNVkdBnPgaV55GPIZhZqTVBV8I6+KyfgaWOGWlYUXEzrWUIFP0IQMxsmqbe+7osly9d4nkxBCpnTB8alCibB1m3GqaIO3e5x6mwAxID9H55UUvM81xqZWxxL1oChgybFI2vyXCZFkXZAJUx6d4Hni7OcoPIYBWGj9YKtLLYGbV3GcdSBoo5tYiiyMcMyt33tH+eGS5PzgSC15fWHpwvlw6zp23wlZe/tt+1H54bvCzLSMnKbsfjEVX82DvwfLl27RqABw8eDNgWP/Ko3L7b3d7XK+SXjFhxpjpj1Rg7LKMeFgAL4hy9i0jfdk0sdGiVabRKohwZ3Na1LWtDcqR1MxI8jZQuADNXhfUGESj5a1gzI/M0kbf9gqbt3mXw1wBottt+TLlEwaAnHJC53IwUM50kHuGZkNkR/BrZFjND7yWbG+/S5k4GSADLuk5TFRU2uAgL0UwGYbWUaZpGxdI0TZWqLxO9GyyxW88+b8KyQVURKatIKeVwOBTVaAOxU5VQLSSmBAnWQ2xCAjNf9dA+cPRurLpEMqFwVUtRFKVCXtZVRKZaw6T3sNSH/FKNU7Wubf3GN74xNHyumW9RRxWLKmXRWrhIbIhoZmfny6V5on3l5r/31a+Np6UJ9GEI8IXrUd37bvbz/j3j39PT0wvQsUfv8MFE98KLr9x+5YUXb6lomSsSBnR2fn6YJipeS41EnCLdqN7Nei+1mCCpoe0wT733WsphmM2UIlGYvXN2XouWov1wYLfhUiaW3GNnW7bWiGcKFLCZmddSCCjyLNDnxQcirCKxurmzaCTXwFEDWHMHikBdujnUdQwqI+rcuK0bi373kRcGxpkiJZsHABbW4eRkWZa2ricnBzMqNscmW67R6JOHhXczhYp6RdQoM9g/7HuU5HM0d4VCMVLeqkql775vzAuQ7C8motMgL7Xy+DG4uhh8WFP8TrZOPjk5kMA33A/h7Mdyj/zWRiaYieCxpXrAVgJYZ8lpQhEP+Ic6E8BDrfE2Fp2pi7ub+4Pj8jG9Cne3PnKNH/bFQPQFdsintJ+f8no31f1uf326V8CEf+AUARU5zLOZ+drSYRELEgUA8MRDm7kWLaUsbeGui+yguXcTFWgBSQFU7p8duQeiAKZ1zNucSD5bcgZXKpjjstSy6xrkEaAZr4gUs842DvM8D+eOjxrpMfThc62tbRsY0aIo5oLWn8EVkurN3Udkzd0dXYO52hVqTDPDaX9Z721tzLMOlc4PrpSmUgDvrXezWopMk47vYJa0R/nlLiqfzSPcsa6r04exgHRhRwpp7qwb7r2fH4+iEiRjWbHp8JFwc3d2qRLRk8OBsTvPumpq5lgcjbTEmFbNUmZOcbe+rMv9+/c8TLjYJR50gdEtXh7a/Yl6d9APIYCAz3l2XNasWXHg69/8/f2W/bBB0RcslAsy8+grP5JvfJpveewraeYAlAfVgFaGE8lwQxS60T/srS/LcVlXOk9cr1QYSvaIgOIZmwxpMz+uq5kjOY+5of1xHAa0ZLvZcVmihDjIWFiUM0B+gBMG4fM01WkSCcJq28jDPUehEBBtwtEmwVMMnDNxPC4tuNNTruMEExEMGAZnjHcwM+K7pmnqZufHI3Jcj1hMgTRbW+N0We/VsqhSRPjApdZAqEoi1BkndCf4mTn3iDXtwrlbp0P3w2HORAJ7sZF0X1RiCsxdXKBS01voGQ8TERW5kLChryJATeoPT3LM3pqbX7l8hbZxAH/yyanJuTYBEYEj6DTpYQXZpbvTglCRt++f/fizV9395a/dwU4ZfnhW9B6M9Z7285Nl+H1p6R/SkL59+zaP5NHKAAmSZPFtKUrdQAg80TXMlBKzz34boyb89DCpFrOWXqjA/f69s5L2EfdbSyi+6HbtFbKoHA4HBCIQMGMCkj2wuQ+DyULUC3S02hsT+LhxuWpY18H+keMyM2NdQHie8G2nEX1Vi/dudR/dcAwHe5qmZVmW43HKvq3YBQXLGFd5aFy1W6+lalE3qOg8TbQ0rG/V0ryOxyPRXkktF1mzsMo9xu4kGdVgnA6BDFbIoPYUaNHU22a9d9nNPs39NFScgmrcDVkv4Q4CqUFQeCnp5nM2e+99vFlFpyrubqxAxCaNDtDURzIQ8DvPlpU+G0+f09NTSYYqd/8DcIN/GPv5PVW07OKOT/7ge79NIIJag0cFO7cNcNWBMuSBSz5Hhag4zE088r21iLuXotNhBtvTmkMM3QDcf3Duoyou1Ts7APPuKiJFB2459iFANJ87AzCB7hhWNN1I5E6w1nzkeyMKU0bRGwclgA2cCVV46uSS7ThZrkTsGZ04RSAdgh0ebj3gXJqlwqyrse7HZZnnOQ6dUvThcUX1sgrFrnIE3mMY5s6eBhyHBWeyieraVzimeRpgqVEnIeSVT3+dhBj0UctobJETIxBLnzRsAgC7/TpFrH+YEhGsGnngPNiQcSaTLMUGYG63X7mtqsP5CEvPo3oZiOIPyq1lkVctRTJsflwaKRZevn3n3QXhR3zN8/xoae6P1g3e3+fJrzzl21555fa2vJEW5jNKGIzmsn0wpJhGppu7sjW2z6WCst0NpcAT+Qe0bufL0rpJ0kGVWmxdHdCRnhVRCEOU3IoYL7rDQ9kMN2r8Sywtnw2kkAPjpjTwI/PIE4kK2ePEhyfOR4EeRYgWtwKGBVpEmYoMzePeiezcF7q7A5imufez3pqenIzSA6GnHcIbiCklIbShujssS3UBd2+t82AzsyhLVLXeezOInJycDN4seKTXmMcbR1QCKiiuTiczHijLDGIjiEuRiiAKKanYAQdSR7t7VGM6PIA1HrSygX0Pqc6YNMFhAhBEyaPEusNdCsA8ePYKpgDXaSq1SjRhUjN7cHY8PZm/+rVXxt4dJfX7utwf+fUB7OcfUqR/SHmutW4OXyAVPeiEiXyI2DNrgh5eMhcAcxCyu0KKsEepi6h5d7PWbFmbu7P5IM3XhgjqqEj3ziVzd7JjCsK9KpQQ7j2P3MxwG7HLYrDGSFQEyufnDvawqRVZux4+o6fpyU0atcTDuQuyq1IKGXbDPQT41etxlSpCeImEjT1P0/H8nEF5rRUpZTb0mAMZqKNVW5dleai6QIThuzhQo4TQHjy4b73P80zrVPLY4CEbwg9YN1EULXswFgWbWT7BBnfzCBBk6FuVQSfG4kmxuzMeuI9pPrGbWRB8IQDinQdn79bayinW0WQ8/3XLvuRmItLSsYd7W1c+j4m5++tv3XuuXH35lTtDPPYy/KG6wRc07Xsq3ve0mffXhds+za2eLOG9NbJ4UooGNsvGOe4Q2dAE0zQh6d3cO4PYtoq7r2s5nmsykIXz9ebZA4+4VFdRdohln862rjzguGR5jsdA2dFjK2jjkwNmNtrrbDHibVuyIydUted0adC2RkWdZz4SiIDLAIqM3dhNBLDotrRZARwYBdtYwmXe013l/j87O6O4DkFzOoBjCBQ0KfV4PD60rln2z2xYGKVmZ2dnHNL9e/clmEd43wyibRsETNuUoAKzgdEZ78pnigA2DRLW0ScSAwh7jH6uRZVS2vnjgceQMijgr776KiLruHsoRqc9FzEdcrN+PD/nCq3rkpECuPtyPLt6GO1Xt1tRhj8MAd7nkB793kdV8Qe7niDtH0wV//bv/M4Lt27mSoavIkDvXYNjNGQLgIgwZ+NIFiwh879rkStzOTsj0tbDfoV++3uvr60zcsFV670dj0c5HhEe8bZq3B46ljsLXd2sqPqgxch2B4iH2nM2hiqliO/HG13UHJHF2UYWBvKR7ZpS5fgWTzVkc+T4QmCI6F7WOLTleAzH+BFZ21dQQ1BLrXDPwFm8nc7CWC2WSkzTdHJyIqMeaheu2IOlwBPCXaJOT+poxZCXinSzhM3GgIXkXYVMOg6gRFkX3LdiYA+whwCjUey2scxsqhPgZoHBkjySZLuQ/oi31UlkcjgcLg4N+MorFx1gT0DlhwTGerLQjhn+kUjy/vpgosurFJ2muicVz6jV6DVFE7KzD/RYg/FW1gwc5jLPVbIFrhSesBAth0PdHdcwq8uyCqFItQyDjg8wUBn7PdndBWnGiow0ZDxA4Cgjwjw+SApoc2fNefECAYJtczua6BeMoW2agjFa68PsjM1XVPwxGxKAW02qOZnn+VFxG9/Cn+vlS5cwbsBeB9n4cKhBgjcuX758OBxiOgZaOMK8IvRhRMjE726kHSCOugwHOC15mhyjbSQA7Jq81Mhl79nnmPINylwuQ5466DuC/EuXLjEwtlUj5aCpxsfucfezszMHpnm+fPnymPER7Ty/f2+8c/8pEbl69erVq1cvKvnH/fx+f32s0D76HrzX9ZRC/sOILl88HE4OJ5d8V0OOrfYLANhqFGNc23HuZs4G0e5+Mk9XTmdJq5s2+etv37t06dI+Gjxu3ns/HA6jG+t+4SRrv8euI6bdrLtve/LCcBJRW3avRTt4c5+i5BgX9uRwGTZrGxBB7xa5luT08egotombPNwKnA9cp+nBgwe11sGaRHHKJ6KKZRoMlZ1a+Adk4lTSnKakiYiWQg/Ns72QimhVVtGLKNyptxVAUVYs5teFh0C7hAdJGVlZi3POzVczN6u10jmBR+67MMod9ogF7bM7G9vw8bv3SNFnOl7T/k9VfHFHx9GnAQ3nMck+xiyc/Pv/j//no7v2wg6+INiP/vw0v+JhefNHrOVHX3nP62mE/Gk+8p4S/rWvfe2FF1+03jfMU2IO8riMhWMXI+ndhCXycPduvQjbOSgjGhCNsJfIvbMjRhVazoCqHg6Hs/MzkvX7fuE2w5jWKWfWixZRVy8DI9GHSGjUq4qAnbRo9jFfbOZtXVM2STMSiiSflrqR7MlQM5No/cdCNwCSDuA+5hz6LIucUujk5ORkXddutmQ+ycwi1lW2jhP8eB3nQdBC75aGX7OsC4BpqkTA9dbdnZ53W7uqgJXQkDT4Y3m5crUWnljDVuiAD1i5J0xHICyMkiAlNfMRxOq+sZOKC09KFRGoZ+sGSV4FDdlNQpOtE6w4aW7H6IRcmST4T5yLuw5CzId37dPI6mPf/9hf8UShfewbxr589Ot+yOuDie54nqrSkW2oEO1w3L3zcBdoUZgM+K2okhK49T5XZVvOeZ6hiqwEhkvrvXUjGYa5qWSpE28YbTHd4aS2hIi5+7KKCkopUgKe4QRLAWBKxlRFtEZRFPcGEMEriW1GZVZUwFZG7u7pKmeRLISjUZqH7m6R2hBKXQkERGzNIsmGCyBLPrBffSKF5smPy7IuGRVjlJBFwnARdi0UR40AnERhXncv2FI1a1vPzs5FcHp6St2vUwWk97a0ZaoTTyxOUDRAzkVV1QgAqjCxxMe2rZcMnLVmSZ5QtVgjw48v68qK3NDGSZs0IljmLtZ7N4grz/NhZDHfhA1bk+XhGBa7w9va6EppdJoU1SKQbr23Vmv9xje+8b6k5X0p3g/whvHiY7/9gwn2+zWYH/vFpRaFW6N15WtrvY+TsTtQirobkozGzETQGqHmBfA6laoBTKdUAP72/S1XRzbXoeHd0dZmIqTXqbUABUBv/djaNM+5owHi5HNnasKKRERdfLcz3X1Z1nmeRMhfJWyOJyruatmgq7EiQCRroQIfgqwi7NH8oEPSb6BuNJgYpZk6Lf4sIMRzjK6oPmit9XaYD7XWWioqYnTLcZrnMI+BaAI1Dqqi2t2729ra2tr52Tnc52mepqmRcdAhQNFyeno6IJYe0GP3nCA3k5x3pB/CadIitdJf9b21U7QUKYmUHOCt4NfXhH/RfWrrShITiEQDqACI6c2bN/m9PvLgOTo+sJk16631dVnhXlSzxtpzUsrJycm3vvWtC7v2/f782F/f8w0XpOPRj7zb5U93PfnOT//ieP2rX/1q0VK1TlNlaKJGJ3cRiLlpmnz0fXtb1+Vo1kWHLOmkBVEYIOjG+vjz44LYOWClYYu1a0Na+Ay9dXeQkuHSpUvEF/VRCZQ+FF3HvvXKfeh8rKWWyjew05KbmUCKlloq4Z8iwj71Dkg2xAa3fSki0s3aupp1ISBZJMJyWhhTtrhttPmL0ekYHSt2YuczVto7aTyl1MqIgPXeaC3SAxxfBfeSrDfsdFqKzvNEiDbMWuNUcTbU3dinXFRKLbVWiDi7JRXNpmiB8girXYN7BbRWMhjF9ySCPCTW3MHSqFKciFM3EalTLbUKpI7mcSKqhRJw/eZN8AiIbF/aMDHR0RFisGlbFjMRqeu+6y7zdNeTZfh9Ce37EqEPcD0qyR/gey++vmWKwlekO9ytK38J68kAr3Wa5wPrU9ksshSda4UIi4pRFEUbkc9DDgABSjavNPcQmN6y9qCv69qCp6WoiFlf27q2tYiws7aIYAfsDcB/Oo4O1JLFcyzgsei6OAw37vtpqqw8FYqaFn7K4VV1mmfVEpst8zsU18iN0+/eix4J30WqqhQxeJ0mCNZ1jefpfW0tuHuIkTZrrdXUgIjDUpWatJTSWxdAtUzzIfgEkio5QtYlrN80QqJyGBGii4ZpTmAK6A/B4UW1pfNZd2wmjsS7OgDXolWjBXlPvqvwZVxGBTkDeyQG0qL80hdeeOHVO3fG9oondIK0vWiRKuuySEbda9kseVo83/jmN8dmfV/e72P/5O/fusZ7BZ9/VM7wu4noU775K1/5Kutbt7CQQ6WYYk7ehdY7IGygw+VTNzhK7F6v8wQw/Mm9o6231oKYjZincE5B9VB6a8uy9G4npcCddubQq2yPPXDvcEIPAZYxsrqVJivcvFetfE9RFYhZB7SUOnTM8Cupe6WQA0okkUglavfDSo0wXGZ7EVFVh0dXh23xBCLC1BQEBUUnLVrIRZeQbx/ldwBEtaoCqMNvX9YV7tM0MRIlgrauyA6OrA1iLEBSayIjVxZ4qeQiK9tKx9gG9000OjQtpZZKXsuSDSxsUGyzQRsLWYA8BjJ+7wxRJ5uXY8Bcs8oIDr9x88bt23ckDGgsy8qOddTwIjs+p2yBFaASAUS+/tpr777nt938bvL8qAS+pwzjXST2CcfEez7hEz7+bnd4X/KMGEjge0mhzqQRDSB+d5XCSlUuXzyYhj98Ms+lKGMsiKAqjsdjQAUSp3tclhMmMt0VqFNdlmVZlpPWqAZSkTqyLmVrKOQeCmCX/o0lcK9akZFOEYG6dQL7BvBD6AIbwRxmYBWVxMYrgx7Mx/7cFNvOlYsoqmybLZ67906wI/9Uih7m+Xg8Hs/Pi2qUbeyo3fhDZWLKMyzu7mtbzUxFt84pZdA7KpsSIR9zRIAReah4UBB91jsyyeTua2vkxHJnq3W01s7Oz09ODsOXcCECrtdaBSBuw3pfU86BgHFoMmlKtJzVbVnycUMPM/6cerj1xgHSFwq+sjjgVWUM8MIefSrFi/cptE+veD+wvn162/sDiC5/OC7NI5zL+s1uZlVqRA0dfVtBpnld0zZ2d/Lmj9gVHeB37p2lPIFblNWzIhJ16bKjbUBiBxl/0dFaIFVWukScxP0WDRkE6L4GbRuhjiLmfn5+zlYYEhskDqlaxROL+dAWzQGGb+g+mIBkuDD5GEKNE5QVQVXd1ubutdbz8/NlWQbtOzO8uW0cgGY9kJda3D3LbuXs7Iyw1WmOxq2RyFGlVLFNi444BID0JvhwMa3mbV35c9BEq9ZSRMMmOTkcaJMzmVRE2DhDi1g3QTDOVVU3c7d1XbsFCirOPNkadYModifXEdzt+o0bTDXQiKALLaLH48IUMSP1YYgjzBsz//rXv/40G/2xfuP7+vWxd3jKF39U1/v9xgt/+r2v/B7cIS6KojLPU61FC8waAltpU1G37t5bb62v3fuoBJjrFAkkyrB7a+18XRCAy8gUEkRtblpUVGirYdccR3YrSO3CP5XBy4URbnVPun8n/3hrAkwspwUYWDE3EZyenCADoqpSVA/zPNUqKiSlwEB6uLsbu8mz+RgnKvDGKdMOMFRMPqDHDlBLtG5192VZZKjfwbPlMPPKjBajUKIkBjRV7b2JyjxXZANYbIfHXiGIu5+fn7fWailaKHxqwLIs2NF31KlGm1MaFyO05dGvqPXm5L4xE8C7q6L1tvZe42AL99u7kyx+HHE02gB0a26IBIAH7o2ZO49QtsKNwTVV4bElRITsENqU3v8fVLxPluEPoKXf8wB6mr+G5Zg5eVbw9eYqMFjv1rvVEnyR4uzU7XUq7j5PNbWuR3bE7a13HkQeyCEpopH/L8T9QCed52ld19ZW9xmkz9sM2G32KLLLclyOi7KvEj1k93Vdrfc6Tb136V1rCY70x+1SRM1sBnmibRAIbSJrHBzU/r1ZndM4Jf2tB3mdg1WBUScom5xbkvUputVaiFdZ1qW1Q6kTktlL0lOoVL9hizoYGWrrQu7pOs3DDNh9QSA3kPud00EDY9fEoDBpy1kdtn+Ay92RB0cwZgTrqzAxZx4VWjVVq4KJ9SKZD2gkBonmKZ0MwCoCiJszDdy7Xbt+7dVXX5UganeCbIwNENO2koSz21br+Ji9+wR5fl9C+34l9ikl84dX0U+4wxO08Ve/8pUXXriJaDYoKmLZK9qC4EEAj5R+KSXbu7t7ZcKkd8gEN/QOkeOyhuSA6VKL2ISz8VJEp4qWxZZlWS9dyqzM2KgsPxwRLKDU6ZDNeyVrCadae6F9Hs2ZPMp0N/iXu9PSNvfUcuJFzSHu3Qwkr/UMD23czKH2W28kjRKge3cL7mhy0VGoeTIEOYkbxeswz8fz897a8bhcKnXsog2bBQDOjp52PB4ZPT+uqwPzYR4lBJYQK45zWdaYpt6J0tQsOOKiWe/Y8J4A0HrvZshkEV/UomZeREenCSDIX3trG94jtGt3QFQJQ3GAiByHr8va1sbIuHu0okJG9gVy4/oNM1vXtiyLOzq7EwKkHfQ9FxdERP7RP/pHH2BDX3jlyb+O3faBX/zRXu/2FZ7XEz7CwKxG2k9yU8LMmZhDGm6eXNxkC5eS8EkH2tqOR3Nv3R4sq7PGRiMP7PmNrLazbm4uJVzuHqGlrF/LjbeuKyMdPVF94cciThcjvSO2Q3yN9phpn7szRWps3k2rVwDihnZw6DhuUl5o65ZapBRGWykvbW20hkegC5BI7KTCb62dnx85kjrPEFmW436MjuAAqnSaCXugq9B7Oz87d+DKNDM5mscJPAoVXDYaa1oRRUXcbGGLBhn4KcQHDSJQMtQ6xvxi5MFVW2u1FiYMjOXBbomIBLJtmtDbUalSYscwBiAKEbKiREA5PXL6vdevX79z5w4VNHOGAFS072o7RRCtHR/HloanUKRPfsMPqXgflaIPYDM/ej1BtT79R9wznXFcpqnSQINn4DNyLQjpcSpF1FJOL18qAk/IhECObXXGFzem/iBPFBFY5AIB1FKZauqtp2zGXoU7+wqYGZK4AwhYsput+72atB5hu6U0xl5VFUgRZbmOA+saexUYFKg0HOHwDKjGqVWLFp0s6+8CnhhPBq2VqmiUSQyJMLciZZ6nB/fvt9ZP1tWzznGMsY53B2mH4Pz83IFpmgL+hgCvGI1n4KF7FHWHinSyBAJuJmwmKllZDBFB0nNhHHUUG003hcvP9SIMNRh83REcQBCIKo8LcmgJ6UxLLcotEsANWjuyPyxU9YUXbr366l0IuLTKWBofVRXuCxMSwB5E+WRj+Gne8Ojuf/QNT//ihTc84a/jerL3/r5u+9g/qQirbauIS9QSgUgBBDmve4KnVAQy1SIip4epzBPMBQ3uUgtE3nzjnYF29IjdyIBSbHs1IQ2ttXVdtSg8AB7H4zIqz/JZRAr5qEKNi0gzc5GyaUIXyEOAXCCdaqiyvygU8MRagjBAVevdgy+GYG8ZRazKxDJxv2aS2WNSk4RT6eQMiER6KUXDkwcreZd1PT87v3ylEI9dVNe1ecK5YpVBEPJxkWCHiNiVBHg1POFaCumjROIpLTs1IjecZ/CQlEglS7QEaK2RExTb6RXlSsP44Qj5H2lUOJm6XbGbaJGHdaTZTsqdanzL9GYQ/caNG8xqCEmweBZJ7DCO8bVH4s9PYww/+df3fOV9vfh+L3+X693e+YSbPPZPX/nqV7lJSP7UsmhBGf4cVUEE+ZmLRhFLKRUCSHbSMIc7G/8kpsiprXfrKHnmx/4WkWadf6JZqKxzYICGZ7qqCutwMWzpEsw+8Kz586y1iO8CWu/MWoF6Jc3j4FSMB4ISOhpD3LqCUEK4xcIh7cH1FakThIs/ZnY3xWG2zIeDAOu6WiKUzZ2yE2DmfD+O5+fkiDwcDsMRN3cmypdl8XGcu4cVkMW3m02ff8o0T0h6+N9um2tt5mZtaO/dw1iiz7GFIai0EUks8p7RTkvVHRH83nvrbW2xiraNUgTXb9yILlClRGgfcTyQOjz6xL1/mXzCr+/2yvt68YeX5CdcH0x0x5/YacCyInecxTv3hDuduIM8rAEwgeCAOczOHpyvrTvQzToDJ71b0jRfWEpPhqre+lhKc9cadbLHpLbIxwX2yIWsK8AuDpJDiAdmKbsTz0ivmHGf4VezGBDpssWoSC4dmaTe44+Srq+HhR6lEW7WW7csw3zomeHzNIlI731ZVmTyHKrTPFcZAilgHAvAyeFQkp0APLEyGs4ukhheO8unQvwifhw8k4glUo0eVpyyw3zYvGdEr1Qd/LU5z5I/JIMlzNyDnKznK8HHmYFocgUpRZuHHr9IdkZka+2Tn/zka6+9VukRbMmGsOpfe+210R/s/TqZT/P+p7/nj8TF/WGupzwyfvM3/6m7oUzL+gAAIqlJREFUlFrY+C7jkduUSmBgw0ZzoLVetZSiaH20VnLg2Hb9egDkXpMdUEcj5As3xkp8eHBjKd291FKtWmrx/ViG3AKhU/lrR4crIKihHlV1PsyZTDIIStGVw2TNs24ZTYzAkMeTr72zq7hZl3Tdd9OL3WO474oKfYDJBB0+TROpdi6dnjo5g9wdqCzEcXfrti7Luq611pOTE6T57oD1ripTqfM0OdV978N8xy77wvCPJ2LbARSRQXsrm18hAyqtKoS5sySaIUriK4DG3qV5fvMgPC5HWgRM5ZeAeWKcOAJIuuscZ9k1W+FiX79+/a233hrLieSgn6Z66+bNv/frv/40G/cPL15/5Itf5GqWWntnGUJkLLmanOHdahL/quq+LkutVQq5V/3ts3PsTDkEMZY+djU9moHQ/DZkdgrs8gOcnBwYPzMkE50GKt4DYfLopu1rg6MUdcnULi9Jz7tOlcFtZkDffZji5iYu7suyllJUpXvv3fUwR0BANYDDkjwAAGdgDBPAPE/n5+cEnNC4YFApSNsI81yWo0R8P51Dat1kWhi304SMCZyQmm6R/qELweNun1gSuECixDkhdQIUUa06yunDeEi6A7dOorB5njX7gHbrVSbOXdGQejrhLBgEE8kMhJJaYRzMIK9KJgl3DtVYABYk/uH19FfAeuEClFJFvFu35Gc2ER64Cj+uS3RmgCjCBzI3NYWIdaP/zAwKo9CWQQ34FmQZKpSlaGa2LAv59+mNP0QPMhJIYTnTlY2kJmVy+JahSs26u2ZYJARGYtOqqFZ1AoeQ8R6zURjXzNZ11VLmaQo2HEHrrepMXcZ+apn9KRwmI0cYdkpwS3SI1FqnaWqtnZ2dXbl6tWTkXBkZYy8y+n7zPGMXMGDIyCM/1rYaS44z4dqMM5UoAimllqnWOuozWO1ERyb9anPnMUp/VoUAMmWtCYTwcZwcDicnJ6UEIXut9WQ+AZvkcBj9ITBJJu8H4FYBsAcsRxq15qXcuHEDI2yW4etu1nr/M3/mz/zBbf///7/+w1/5lWaj4t1bN/co/SNAgI5fN6/TfDiclFK16DRVKTofDqVMIlKn6Xxp6kIXrIyye8aCuKBMMeRFGa61AL4tceQiUh8Sq5AebEhxHt6q6rCxb6fKsrTYt8OQX4lioEObyr9nU+9SCrcu0sXVUubDYZ7nUYwzT/M0senEFBkZc0+CsFCcokN6ZU+U6e6Oea6AH4/H3hgbMADVoihvIPvrycnJ6D/EiwloQfTvW9fVdtw0NHVUBBKhZuaAx1lIBAXvySRN731NEBUjhPxshNEzfEU8QMaktyDBgF+7VIKx6qO0SRnGdjeGuPPQiXZQxNO9+OKLr776Kk22WC4zVf2FX/iFv//3//6HtN3/NbyCVTv5LqK0i5hK3y0o0k4Uc3/nbLl3vm4BLsFxJUnNSDpuTEz7BQ0wAl+SGhosq7uHUYXA3sUWMrPz43mtdYAC2d9AdOvCNdS7RFC9m6OWwgoH7hxu3RJFz2UL1rh7FlTGvs1EEWVwHB1Si5u33vNs2kaqI4Igwu9HFNno4XB6PB5792VZLl++TCGoIuRMUJLLnJwcBksGz55Aq0RilrGB0acUoV93kQNVxY7vdSelrAikLIVW16yuRgIotAjTYshEbsksPOsWS7LwkU1bi0oSFwx8XKagGSMLYhT3sJla6yJSanGBw2/cuHHnzm0gzAFNisCbN2/eGRXFf3g98frSl74oEg3KSq2Z5YN1dTcpCsMw65AdwzxRWRptZaUWPT1UGqTdMrBMdZRhZ4Gw6hsSHXdrnYAzqtngxYx0b+R76feKSK3TiNo6APGxUbaUUERRwk9kg2AArJRULaMdWdnFzHhDVYFjYBBEdnsvKf4IPepGbh30qEB6dKQpGh5JWVGZ58PZ2fm6LuaXxE0RZZB6XBYSeUzzgW69AlFEkVremIpXRaaYnT84zIwGDKsW49SxiEdTotw8Q1j5oJspzgYzgqJMG/beEYdTBKjH2jdOBIBocSQc20g30Yug38tnRobvJdtSimgplW6QCq5fv377zquyhcrc3W/duvWHAvw017/xR3/6uR97BoRXuEMVdBdD/XaoABo9u1nopyX+CqA3H4l/97Pz44Pz1aN+fSgGC10gYnD2BKVmckQ60HqnTuO2GCVAIhFudQSOytPDvLB72WGUFZHusN5HNgQAv9F3LOiWnHUMmka7Ix3FuTqqDACIRs9UKrcgWibUYmPvujBSBuc9bus+zYez8+O6tuV4nA8HVyjnkJVD0zxXFg/13no3R9l1VFAIbX1NlowI4/l21I2xjbw4+DjxH0nUg1Nbd+/n24JDLIjzRROYIRltoskxMvPEYNDN8qhetNFYmYVN2CWuhfzAbnwIugatNTN/4datYQswefALv/ALfwC7/1+HS5BoYgCI892Nmw+lBKJ1Mw6zaJZv1uQhV0DlrXtnabWxOs+FfZszUsTv2BQlcUulmLm15u6ttdZaZ4XMBvqRDM4UjY6h6eIKRKSlV8ndG9rJIzUVOy7DLpoR0G0ORGoJXGOJnRmaLyzQfD1kIQmnYtuLMgweyGqzgdnYj5ShLHc/LgvcW2uVIeJlOQI4zHPE10OchqpDZIwAAEpq2JG1FimlzIcDrfk+ZCbvsLenB5EXAA04VyymJN5LknkHEarOlSajnYaZk+CVSBUHjjyhOSMrgES3E0bXW0e0jWS9MRDUH3Lr1q3bt2/7KJwC+Mr7283//btuvfDSnW9+HxngqbUkUAdjWZFxY4eHOGRvStCbDRrvrd8KTUk3c2QdKFNBKqk/wgwUkVpLawFU2kpShn0HYLesKuguwuo7N1Utc8lkZGglTU5FUX3IIUxZovYMuyE3MBCun4uEx5a5IUGI6LCVjU8qAKCQHm58gDS89yjb33LLDuAwz+uyLMejXboEkQrBuq4qWmqZD7OKkhAHKgBj3SoZW8OIiAHw6JSMzMGYW8/UjqqW5PGwLUw9kAk+TZV+vgddwRaCGrE/d9cKEmYLsPZWa0UYPr627kHHEecbSWoZ7kbEwJSgq5KoMndXCYKSOno3O8w75zJ9e8D95q2bfyjA73l94ad+aqiJSWtGiHKzjxAJwsQOdpSibAfaWlMUVaNajfiqICnf4O5aioo63LnoEALmuI4ApjotsnTrkG1Zw0TMMtUNMMFiKRFJZjww6erubt1chyLd9jA0RhPu2LAN9xoSOzGGw+DSu9TCXEh36a1JrZ59qpkz06ICFjwZXLRUASLJJBDVZVlSbWGeZ+aBl2U5OT3RotraqsRO8pCL+B1a6+fHBQS1dcITyageJw8lmX9lqeRcqybEPHwPQLGbPoCmRdLe27IsI/pvbmtv3b17tqgCkDk8Si8tqfPj0lpjPzTm5arWWqsE5Gu3WMDEqxaANClC0tNtVwGt9WVZbt68SdCe9W5uf/bP/tkf5U7/1/nasnElubWPy5JxEGdyztx7nv4EyatqnapkxKR1W5bFPNjOBCBHRK6yUIYoRXWa6jTN01SLspk7awzHAzE4tCwLAx9srNlir0oJzg2azB5UXpBpBOES7S+IwO4WS2cqWJSk6L33ZV1T8Xg3a2bGhgS0KcwFUoqWWkPJm7Vux2UBwrYVSNVaCnkmt2CqJ+sTd3Ep5eTkRKJHnNR1bWR4Hb0tqbVFiScl0iNzvxFk24CU67L03g/zPAKPtVakgx6WwyAMwLCJhQMDmW5EJA8CAFAf3RqIcstYZXQnFcghQk3xqLmLXETYiYdnhAPzNEXQEspl4vny0GAp5IC737hx/c6dO6E+/A+t6Pe+vvCFL7hDspErN4ZAiLqlCd17Z+yqah7LZoKATO9XVk9Osvw77ab8otGCtxRFZjEAiOhUJ4ayI2OSK+vuJ9nQiy/kGbCVNzFFvC7LYZ5LVrbQejWLU0MRFoKnW54xU+djCJmiLwxWsi69Rx/z/WALpGZPn+67wYrwlTFY3eGaS9HD4XB+fk7CSh3gjVJKH7U7LLZIQ5SSKJCy8SsHVK2WcnI4CPFP/H6J6FRE/VnAFY1V40BimXUCMMml4i3iw+wfg2VZjsuytmaZLg/5D7MM2Mq1o+6kexAMdLN1JXMdSgnWElEx60i4THw7KPM22rWJys0bNyXtrlu3bn042/5fk+sLX/jCCOdSF7VkYujWwvHacgEA0Hs/P55zgSIQnbABpAnazdZlIXc/t0jvnWnM1tq6NtpxfAban7VW5Dp221Z2RKcGIE+xQUQo50Xk5HDQ7I+HEYIJQU3141kZRal7aLCdO1kESepCKKCfH8/Z6wgDkh2Dja6gDm/r2nKwcbfs3rIsq8P2g53m6XCYAazLosuyiMjhcGD4SlRI70riAapEBaPHGWYA3L2WYUToSMBotlOhvHG+DM6FEiRWLp4lcmLu3jrBFaRpEABTpdlbgj1jnHlw9n1iOldHTEAkuJiAomWaJsK5+BScz96NNRxM5THhTE5wjIZ6kFL0hRdu0Xj6+T/78x/a5v/X5PJcRNqoWQUvIPbOo5C9lkrSulIKHTkRCSZ9YIi6mS3L6ualFCTaNXqYZEv3aPuQEU4HIq5BtZx5xwQ4dg/5D+duH5zVjBTLCLjSh5cI8MguQMX8FXfZQIPFuyX6/rhDIVq0iDJLNM9znaqm2hvhN0Dc0dbG7p9gIXHmWzDSMURoZXUUp2meo8BQAUxTjerfUixR3fSHmTEaWjePS9ckFoiDRESLjvzQsHnoEtAHiCSX2SgeAtgMLhh3N4gM4tAeiHAdDa8AOMxcslBZtTB0aFkDpSJTrbWUMMLzVPPIdUX/SNoIlihOHSRGkfaSmzdvUrX8ITT6Cdfnv/B5zwI06yR2U8Y7s9MfQhrzYk62Z0UnRr4mwyT7tcCWRgBfrLVW4uQZpooTZPCeDxdJKGb7bE+KfNpzYZSWnUR7povKLkm0u9x8x6FDyxlBIqsC6e6iuieBpCdIyCAhJRSLiPZFd6MMbzvct1pCZcuBpFXPafJpmkqttHZJZimj8ELZj3uX6RrSS/te2fxmJyQ54E3UdwPGsBwcriWPuDGLAMzYcqH3JDdyH8bJFov0gIKNx+NZ2jrBofQaMo2UUfOe1aSttd4bIKoFAsmCjZKc4FrK2ESc4es3brj7rRdeeIqd/N/T6wtf+ClE1zx2A9ss4dBfEfIROjg9I5asg6UXxm3J6gLq2GFZuaO3PqoFAVfZnfLRN8jYIFpEWlt7b5q2IRMinuc1Rngz3OxoYc1wD2mriQHC4zZzuJJbHWswNftuM4+8aI9GPTb2c+u9atGgbATjbSPiq1lR33ebGQB2m9ncCex29mBwKIDKxg1pSCCNZAt6PGpQY6gNPDO24W0UYTnCHEoW9nOyVJU8z5K2Ab+C7oohGcl6nBGBBQ+zIV0pDO676C6zHc4RWlRnr2AHIFvVstmyrj0LyhAp5YTUxHs6gO45XrNlWa7fuPGHiI4nXOpeg4Qs+dIyWrFne+tOUJzzrFWRk5PDaGRHQ4nZQeTG0pQ97OBTWtSCUXnHEGrBrygi1knXiGgwmu3sjIX4fG+uOMMuYbYGyCIe3333LKEqhcfMCI/xLNrOk6hwihKE8A1b670X1ZOTE+a3xnhpOGxqb1iaWz4ZUrb9OUQuLFm2BDo5nBzmA1XkxoER9/LM74TqVjJCPkxQtPcQcigAYL2fHZe5RtMK7DgiwfIgD3VaCGHR8HIc3qwVdgvWmAWB1KnG4CUqRZHuRKm15Fy7sywqXBmMBhGAmx8O8zzNXJKevAcicQyZB13lcFTo+f9hLPrdrj/ykz9JGvBayuJu3bSKiRCpzg1BGwcqLpb1Z51ckAAgaNbcvJZKl42ubzC8ZXilpIAxWQoqQANIJeWYpum4HvvaJZr9xab3LX5bbOP0yAsAMZKMntAcQBYzY+8QwnpfW58iNeMC6S0KEnxXpaTRvF4goColKy1bhysRE72raJ0qtbqqWPLUiyrMCCmnix8nm4o6BIX6ea61T1OdmD1yuLmK9JReEbRmMRL2XqFp4Rj6MG13N3djXQGCTU4AM59GlrwoIEy7Z2ghZi/n0BlPipEbugTXAfV2rZXjt1wYpDlUtriEj9ti5zy7uEJXLGMz7eSztNa0RGTfM4kfyXBVADdv3HzhhRf+UIAfvV68dUtzfUVwYCYOMMfqtvq2FgmukG5dhWUtBoiynN0yRpMsUL6Df0CkbEUvPv7Hhrq0P7t2hc51frA8aK0dEEsoKirFet8M77Gl3THKfWNLhzcb+swdZiUcK4bfau9BIDVQkvG8+Ql3t94FjMWqe4vssbmjS2ae4RglGfzkUDmCLD8IWJFHctuoaUQS3DbPs9y5c4fPTWHMm1O9G3VjAIxF3C2ia4B7MNMi7A3XZAzhpBC0GMk3Uc+zbAguZ3PDqfE+8aYNCpe4nARgCxRJFTKc5M0Yjh5bEjyJMRgzu3///vF4/swzzxxOTiIXMIZs7pJBTvrVSbvncGJcP/bxj/GpkNlOZst6Vpl4AsITjy7DMJFsskX3aJgt23hzPqNDogXCjMN3AdsFAOhs6jdS3wASPKgJdx1JDlKOPPtjzxJkPq44OAW99ZFdWFsrBNx7Ato20D+3oHGxuhnLbo5nZ29+97sYGMAImYR318yOmZXM2C4bdsow19KLsRS2oIJw+FSiFY6Zq8ilkxP0Xkcx8NjxKhaWqx/Pj/fu3SuHw9Vnrl5cYo8XuKtVxNiJ96FdvTH+fIi7WmWABce+9SwN8DSYc1dH7Gmchftd3Xuv3U2C7oaRee8eajAKClTXdZXo2Z3HJGN84+nTRdmkX4LY1iP7Zdz6Fpx+HCWXNOQtZWN3xGIzYKyn+a3QMnFZECj6La7gHrjo0KXxV+e5yFhDDwpipctuMPZWiZh4sgq23uEbgHZdNuCeRMLDt/iaB3R+5ADHK1HzFNEdR2AY8izYrJlYqnAEzDu6R8QuzgLsknObNGYBVndUlo9j83G4zPyyffjQAMluVbxqKYNN0tOMil9jyIlsCDnE8d69uag7O/24ezDgcAqKyiWtD9qKZEqKSrntHHFJ7QfWBgpYap7JRLqnXlUmdxukeRi7DMVZQehmJrUuqr2t67rOtc7swSnS3I9KVufMDzGNcXbGvgrmVryIMsoSg1WRQV8RalBoh+eBgcwh8bDbbWnK27bKuRzkHnEJkeZyR3V92N06yqeR8V+KWBRXI7PYAqBU7xYEOpnIEpFSdJQ5ZaYr2NtHsN5zIUSkLQu3WnfvrU3TxB0UhtCWHQotN16SWCAXlThlNlsi369SlN29OyhlKqrSOwZANKXXu3VRxcgzxTcUoPPjTLprKRBokEvjwpDTcAu7jUNeliUE2F1U+9qZGDCgres8TxItY7hRaYrvgDuOfcTCs2vUNmQn4zAZQ8Ni7NbNve7Yf+Vh+XXEkN1srxBkt1K9m4ppeUgJP7ws261FJXv2Rlh4fGHrbTkuly5d6u7rskzTdDpPWkq9dOmdN9+8fzznbafDSWntuCwKEc/mNZmh7fshpyfpgCjY4zNcXy6bipjCcWAgI/Mo3fo8TUtbSVoOQRGRUmmTH9d1WZtATqYgL5/qdLx6VahpS6zytK5Xj8cf0FXKPKKJDYOZonX+4JxDbutap1pUCWPW1IRDGTmiOWocO3w9mvJhP+QQdEXJ/rhZdxu5kd5NJEXXnelxUfGsyWGgoBStkmgHHuSttWmaSvZ36kn4PE+T91QyZlQROQyYe1E9Lou7T7WqRm23gEUMo2mYMbJvuYRhrijtojj7uf/MPGLjTnsjXFa2dAcAEH3hSC3G8HpV0YQ6Q1CKmqG1djwukSoYXfAAc2trq7WOIVtnKbnMh4kQVi7V+fn55cuXo7mhBtXusizdfY5E1ENDjql3w9gcGZLhGnNuNamMRaT37BHh4YYBKEVHd+WcGcsT3dntr9SoYw+ByDjqsiyttauIQG6eiqCbUR4WafiuugNAsnbHuVmKQOo0LcvSzApbunaTWolpUNHp9OT8wYN1XfraIrQjgqQ0hYjBbDAbJ59jTAvUdYuMMkHDQhYRKQ52AaylHE4Ob7z51oJWok22hPmt4q4A2rKU1jBNbDIWLUZF2tLKoRQpoVx664dDXRZR8TCmhIbAQG7VWnu31tpgzxGurSS/D6T3DkTVLuGMu3cKLUKPUlbjx4myUqhJ1MxJJnaY8Mpw2N7cMO90+IejhFqkVqaOqPMd0zTVWlJ03d3rRP6e2B6M0fLLAkHiPk2TjSKBomaUT0QzZdJ6c1AMFndncLj1zgrkKJVkSGNnwg2VZRbAgM2YFFkbm02glko8apx/7qK05qQ1Y4Goqp6enhZWu6V2gpNbRwF0j4xxAPIR5nzWYcva1lorDd2pRleaKiKljEhcdoVMrwFRCQ1ADRA5Py5pToO0LFF6l9KVtiPhe0KGg52U+dpWSY77qqoTjdqNfYZDo0+LeU6RkHEH2qh7We2Rir9wSS60FUWtVXpnUkRVDXj77Oy8dwiuTjNEzs/PRcXZYq4IgXcyjirVblazknvfvEZ23weH7g4srtRxOQIyHw6t9/7gbA5fhgstotLd0by1Ns/zYVkkQIEKBo16v3o8lmkqIupu69HeufeGWemWlDhh5FOHw+OM06LzPBtpPUQcwdelIlAh3YUUCVOaQQhl+Q3Ol2XAm5U0Q2kDW446z+uM2Pl2hPXe17byeUgUF/asB7RZRVuzGvsujH/eSDxNf1VVKMRJX53RrfEkqWnz+7dGZ4RGxKsMHsTyWB4GSAD3FBFz4aHFuaPPRu2XxF0xSKqgQTIGhxcXaNALmNVaQ8FSOagyKjBNk+rmHyJ9EyFhUHN4dj8kxW3GKime67IqOX4ZikzobFsXsprR2Qm3GEH3xyGz+pTnGkdad+l7BHHndmABMLeiG9tT7LNOJ8BK+mbdTBy9d5ajSJZVJ/VCkD+MtOneQ0aGHPbSO7ZRRn0Ad0LfwV4+62q9nz148EZbaTROhCJttbhsbxWk32NEEy0gkYgDw60bsjZoPMx4jM0zUr00zWdnZ1orU/0DMlhLUUgDmvXBSmcJCv7o6Wk3O8JlnuBQeBXpdWoO7YZYaLfIPm4JDBh6tjELisXWupkT5sTzVwC4ujCgAA83nldVpf8xgqwyyM+tj6UO0TOgRMAirDkRETUzkjozDUqmz0EyBaDuoqDhrgQmMeVHIprdR7Ysg0Q2apMzgUWlG8ESGqlmPmokeKNwblPstdZgJoCEJgoeH6EfS8NYdzssbPi4pTP0tLNglSGoEcQys9bWcQR0M03YWYQoCG/lUOl/QoAC6dvsOB48eDCfHHRnElM3aoIAtvdmthGAFhXKrwPpmpJRIBlIcqewPBWCpP4Z0fgwwyJ1KiJgYz4EnwQK+b0zwjHcpw1K0Ru7w+VoUtU/6gvLYDvbJFlTGZp7F3GzZV3C6ARaazVYZnLUGhnH4ZNLEC9uZgIybB2xd3jv3R2aUzfOkw5ZlwX5Wc5BdGOa5xN3F3nn/NyzxYm7Fy2XLl+C6PF4fDBV3Ls/UJZmdt67RMYBEC3izA8xpgqe6b1hMCgDFlaDy0MTJtEgBABZfjifziAuwScPjzoNjXglyHe3sAXiViiFMZotsgG3kNAMEwQJiFs6C247mPFWr48wEePWTPwe5oMWdXdrERMfupfHqrmPIkHJ0BSliqdd8Ovstn5Ib1Fuxah4jgYLnvs3+la6+zRVQr1BXmJVB/gnqQEi7b231j0duQFmz4DTNmoEhH0AQoBUpFQq995+56Mf/agWhXtv5u5CMObOe/eAvMWT9yAfZEzH3b0ke3Bu092oHRAMiAslMJw9khUHjK4OqIBGkJGgQie0N/ZcNp0aCT/kc/reW8YFEQboNua7e2+1HrQoEXlUJr0HMykPsoujtq5CZEA8noxTeogvX0ygPs063RGzbI8MHM1OKgsEBIJimOf5ME1v3rt35o4sqlFVUT2/f385OXnL/S33VhTH7nmgDsWQAx92ScB1uYQArBuxJVDYYvQaVLOSfMshYyClQkepiIpbjvqRuVVRyKhJJNREHx01l0Xp98bkxZNzrVW1cqlKLW1pACMOVqKAFnD2I86ATETARESmaeJWZ8LGZfxF0tb2gQUdEQLeM3zO5LjaDU1EHKJhSblDnCCQOKRz8y3rmsFb18K2tHFY0sHcKPPT3JoPB5Z9U+9RD7PJjRlEumqRpC9kUA2pTuPhVKdpGuEl7uNwJ3Z60vLSzFRtoyZf38MC4wHmU89Ru5uyXaoqBN064/DEh0PQW9epbiYfICK1VobfxiwJNZKIZHUKn1vDLwUJZR5egkgXjeUWkXk+AJE6GopiTEIRNHjJdGUqSco2k9uF1N+yCQzCjtiNGnBliFh4ipkijrDWWlHGQTI6ILK2tvS+qJj5JcFHLl/qy4Jay+XLD5almdGDFBW24imlrOtqUKCripbKTCCQBYBhWsYmp0tCtSEZOKVC5B7bjsIhwLGgDsdUq6UXMP4UkyMY5Yk8uIfqZkpJIGbe1hUSjY2GPud9pmkiJrQy8smeq8QklFojfzxUGTeiizCIle3Po8wIkCy7xTaqsTuz6DFMXtESgeuBPo0DbBe5oT3p7lVL6wY30bqfiForzwmJUFeSYLLLMUSzT41EUdtW7cQY+IicxRFT6J7FM9PLZYANtbC5q5vdf/DgypUrjM2oCDkWM2oVdB9czjBtmes0l2hU7Qhe3i1oIZsqDgQfhW1tjdkp0NiHk5QtvUSelGST6FnFKfnlmf03673XWpmd4nIzEdd7L+Uh6eWZW2uFSvHwUd299V5VTQ2GCLG7rMuCAetH8lfwIQb9FWTSwt5+kkfJyGdS1aXv5HBULc0M3WsmqHle1GlSlRp5MhEJPq2VB7eoQI5n51XlaN5U14FzBtAj+KqqtdaeEWZW21Eoh4SbmSqsbR4QWxBIBEpyV2GgnWLMNKI4akZSbCzqiCxumzwD84C7FymgG6JbQR515G5paEbRYA8fUIFq7nWzqsHuBshA2fDXY5gtws7DI3I3VlfLiB8m2jKSnMJHN9qQuY8tTNMU99DNCfMYqgxS3H20ydgWNSUtvy4uAL3bOIyovkhaUGuFwMxkqxcNh34cJSOEUjYQaGEWjkljETk/P790+ZJ1W9e11OIRtYqtFksooCzx4GMj4pFWNTZpQzgaY9UlIXIbssXT6fDx/51pD0gGmUA/KliUrXeCDeIYlShhM/fImqbJh9hC2E8sJWf7LhG417KFKVpra1tJsVTzJKo7q4Q5D+4EVo01tyrS3Ec0biOtcu802CK0UTI6cmG5hU3HWkZb+e8ELN1U9b7ZO+vKga+tSeqevTdH42Vozv0ZemG5XV09kUJpbqzrijibUOi1ie6CuHEKKzJrkF/TzbL/wj4MvPF7mFnUrnuS9+TBsNlkO0dHdl5GpUPbrYugaLEg7wUMns1p+HmGE+Gu6VsCvq4EM5ZxKuS65IqOfRw6S0Z2S0fjFeRe8nA+x8fHg/J3yTImxtM4K5kJDpWE7OESfGXubV2LKnsmh/DEzXs0ninae/d02NzGwI3hLVV1ix6rQUnbLUdn24YTGIEADkEgbJRMyJ4DF9GHjp54ct8h/jiZqjp8S65ODJwpuqy/y3xVmAB0ubn2I+Dl271DzfbeVcVNLEP945hH3hsKUYHt88OJw2GwEGEsSBzKcY43C+w+x3gAAJ9VAbAnwGqYNsWQJ3etxDYqJda9eXpn7oBMtVjvY2J5UFaRnm7LYD1W1dZauB7cbDT0zM26EjDXXDJZyCWWiMNZytom2B7RnKZkTE3nlkltEaG1382kBATQuZMAJD70odjX/oRSZWXltkYZjEQes+l9cq638+H/C4Y8ReZjA75AAAAAAElFTkSuQmCC\n",
- "text/plain": [
- ""
- ]
- },
- "execution_count": 6,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "physics = mujoco.Physics.from_xml_string(contents)\n",
- "pixels = physics.render()\n",
- "PIL.Image.fromarray(pixels)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 7,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
- " and should_run_async(code)\n"
- ]
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUAAAADwCAIAAAD+Tyo8AAEAAElEQVR4nOz9ebClWXIfhv0yz3fffWu9WnuZ3pfpmelZumcDCJoAIXEFQcmSyZANmhLFCIqAImgpFLLDkh1h/6EIhi0rZEdINE1RIClTECkGKFGUIQoESGwENDOcpae36emlqnqvru3t273fyfQfuZxzX1X3DEhQEiNwZ6bmvfvu/fIsufwyT2Ye+r/+X/5P+O3Xb79++/VP54v/px7Ab79++/Xbr3/0128L8G+/fvv1T/HrtwX4t1+//fqn+DV89nPPxs8KhcYPIIKqxt+o+452PxMA6v8Yn1x8845XPDoonH7m90Eo3uy+oXrHZxYJEXVzarTuQv2jJ3U3Qr563fPvfKwufufDJ9UROv2ZuxNSVYqPURKKr38ooe81qTu36RQhn1RP6A5av2lCi7yH73v1vg/e+7626S5vfvQ2fQit3zom/1BC9MblN0REVaEQ9R9AC/+xnSGQqooKFrjC/1WoKig+6uMiIgIzE5GqioiKKlRVVdTnRyD7XPc4QFVUVCkf05Gzb8Z7gAYh4viA1Cq2V9/XpESA72NSPa2PmBT5tNqkjBCgIv8kJxWPI6ioiChi8B05n1Rbh7tMSkXkfzaTMo74vnYqJxUzZ/KXiv/ne0zqt4L9REW+b/YTVf2wSTmhhUlRvkCqOkgVm4NAoI0TiJmZiZBmOfbUh07EXEpOQUSIwEy+qVAOWgi9pxLqJlUAgYiYmArbGFUBqFRVKOVYCjM52rctIKLiT0ZusD3QVswmpaHg7E9GKB7V1jeXiXlhUqpqBodjFh0dAvkHFmjFJ4i5lIJuwlKljdY+wCWtjaiSKjMHdfVlDoVo7OSUlTR2i5iYmBstAFq12phsC7gwc4FJDKAiuHMBg5xAFASbLADKiZ9aQGfmqgT1wTATl0LEZnbkjgVMcXBaAiLnbwaLipLGAhJzaVKiEBFiElEi3IUDVaGaHGjjb8SYIKioTpcFLjUwZg8ONB5UgZBoijcxFw4O9AUEE4epWtgsFgZD4jMqKpC2gMwuFgqFFqUaDwRgs47lugsHdnJDxDSgg0Cpr1INAJBgcvvXFAMRg1mREqeGi0TU6DIzUl1p90GcViROS4HQkh0tH6+CBC7farSAqkYLzExgp5U6wngeLpbNPqU4idjnE1Mo+Vd0kZZ2tAqXVEhGi4ldy8LVcNAiVXGVvEgrEZUAlIujimZhnJbpCDeaADGrwSUiBid3ArYp6GnFUijAIgpye+KITqSGbJgs+IoFx1C6G6doqSkXXwHbrbBKEAEgrhtyI3paRAQGKGkxs2NAgJgpqCNw9CIt+x6EQCQAyGRY1bZMauJzJSK2KE8wBTPb2pgwoaclavbed9atliopOSeCmCm3TFRIcopETfWmUgiDB6YS65xWiqgZAG20KqlCmYkZAQG6BQQgxmNGazCeVgKpy3THMSrBxwaqTLPaZiM0obFUs0VERCQqzFAKhnFeMFrkluFutNzUi1CgP5M2fCQtcIjmAq0wIUTE1NjTWTj4XNQ/BahCajVlmvNOPjbRNXUQGiJoxfou0kJHC9L0i4/VaBGzhvTG83zmjZZtcJJI4NZpSQ1roDlTVWa2gZ6mpaq1Jsb2TyatbhgfRisASK6DMtg2i+LzORSbmr0vrOV70UJg5kYrxLAL1dxBy2RYBMEeQsJsljK+iwSwcPVAgVdjhZOWLTsIWhUKUy6hIJDcCIBIxCx5RysAudkD+z8Y2yctBMyBsQdBa4UqmUoyQxh8aLSEhBw20JAEyBCEtLG3OahUqTZws+Y2yk4j+qvtMaC1ptVzSWKCgqDUqZD+RQRx581VIzNzIRNL+R60Ot+i0ULYqGb5klqKqKkkByrkW4PGrW3XxSWh0TKVFB8IcqdI+VtqIDDQlO0lpFajldycdtvdsG4ZNTVFblnHbkRkrqAChhFtoQgoTKLOc+iUGE7T6l00M0na3LAFznD+tD9rVWVti9L5F05I1dQiREZtstNoxRRMyS9wY1OhTNAqlZgYjLuRIoPT8aq1QhadSyK9g5bpi44VCWAlEa2kxMRqQpVqq3vl76dp2Y6Q7xzCKyFzG7QtIhEEpKJVq9GCCjGjh2wdbRuMEA2FI5xg/+EYTWyzbTmDQQCBmU1V64IKBuAhEB86x3ODewVg47mYswLgDoGbkbG3wQjHzHgKpBDn1TQITs6hQ2iyAIk5Fp8aIWff/mtoP9aamQJCN9WKJtuLUwsd5FNLxQxV7oCAKYMMH4BU1VYzdKdtE1J2v4+pda4hoKWbWvzL4Ai4JrsmeI89Cyr5712mRrQwtR53qNsVihGFRTZl36bWy+f3mBq6XePwA0PdugkpHLpXTMsHmyXoZ9UWrfl+d600WqZumAhc3CuBkkK1uN6EqjQGz9l96NRocWp6amoIYExQFQhVcu8vIjZQBbPxbS7jEH8k03dp5SisBgAmVlL7wTYpfzC1gDDoyZNtYYLLfN3yEIKMpdGTM31l+2Hzta8zu9/RK1FqgtetoStsNyxhf0ihp6aWrBQQxgyREhMTWRgHpABZYIndS0SbWKp8dc3sgzMH746ppaZwQKgq4nwlJPbg8GOazDTu6GcXI2EiSVAZ5JJW8DOS14lDqthBJ1Nz34wvEeoZWLBKNjBxIW1TS/ySOkRUWJiICheLBamHTuBMGePXpBkCaCsZwgCoyh1TIyVR9z9FBdUZEEwWRMiVpFhJQjc1NHLJQkREUIkZa0zNDKgtsKhCxJxNZlaGmiPpkkC9CJyemuvNmJqreJ+svVlKoVAWsHAGlIlt1qrmvrf1IaJBRPrtK2TOPRQQqU1kkCH5UGSp4YJBkV5BDvmOV3ALwZJInJ8AoAY5BzSNk5NE6PnUmovkUk3kK2gRcmqxplUqZZDjTnLGvgYYfjPkiHhhJZNcrQpl5vQOiNnmGYTUhuvkEr13pvYOck3L30nOHhk7zXD1EQo3Vic1nap6+Plu5Hrz7Gwan7SXqFhsvJEj13/mqd5JTj+KXJO9kuQcXpiX64bMDBGYzTIws4o29BXk5HvMrtG7kxxZnMVRB3pyVD6EnAUjvxe5xJlJjpjsYDI/a19wYSvGgM5jboFVjfMoeSYjMEbM1EAphXob64BK1TwrEcQsoN2JkS1L5zScJuerpG2BACIupTTUmqzi+y4RMzxNTrvPI1AZM4hYHf3Fii6Qo1KGU+SCWiOH74ucunpujll6OxQhTSpcLFB/N3JxMBgCmYbiDnKqqu7UNP+jkTO2IFBxx+eOr5tS+j7IqcKC6na8CD8iatP22cFFtcQxhP2e5FTVTi5BAAPSkXOrS6m8yIEdEVEcp6KbW/xGVHqAzFggZ9JuEYc26O9BTjVFNoxKrCwRSqep7yBn8BtE5JGqMOkpCx9BjkBMrJwSqAAV++LCdji5wfXOQhxAFRpHI/7EpOQ2adHjV2Y2+RLJ0yYiP/bqsYrNIkYQXwfacY6hrISRPrrgGN8+ppKCbyjGjTYzkx0DdshEE8t05iLtW8Rq0g7D0Ww8gcFSHC5WceWaVpRjOjlBszy2+IGdes8xwHsgt0aOAAVr4OkwHbmeduQbc+l1oi9MHMiGHC+CIILFIJwP4uDUoL9/tKqo5nqCOQ5aF1VwLJhprB4jgsiD8GnUwmV0pqRC4l/SGsuS+O7Uemp4Q413O+vhs4oYij0k11MdkvvRMWBeLO6ynj25nlHVT3JIKQ+O/byHtZseZXQg8i/MdKhWXVxPInCT6ruQI7Kgd7e5VWrhYjOzCXKcsQ8mJG510U7b2qPDLTcoanpBIOlWkGGq7uDE1zHYVGP5093sJ9BkybWHK4yeOcPh8Y0kG7DLoTtLEUGOQE1PMSYHtNC/T1C7CeYOaugSX1ew+yph2voF+Why/QTvfEUsI0+ITAgLs/YCSGGver+ko5jc108Q+YjgqTAh8UdHYeR2OtSZUqAb6tBfZ7sVEebpJ9hG4bONuAX6sJetZzGvUomo+mmikwifIXSK25Z+zVT7sxIEe5RSuiHkyImISsRKC3O1Q17fO5VIzmnkUmvkmuXs4o1aKyIsgsUJBieZz1/AVTq4l/ooKTZysauymO9k8la1FpSoXWgTHOzX/gRSF/Mlm6GPc5c797R9hTzQlRa/n3mymy7ydA9syB3DJglJcRE/IC1hY2VqqVeLFBe0kv0gakfNvfTGVgQ/NaaNEecEE8UtmjkXl1PLmI9dhH7+J0u8oFO4UC17JBUTQj3dRRFQm163AKpq3Ol/8W3LKZqL2qkciHQTxF0nGOS6RUUgsp6FVASlKJS0+1Ng7LYmjVNcmZ4iZ1OgbuN6cc5N9MmaE94Go8lJJi6iAFGXSYa0sU4sDm/bJnpUjGAncyFUdhbrc08Z75aUDPZbKIKatTAS2lMkQiSitCW1/FA/wXBvpqA4OA+EMeR3jI+rVBERlTQyCogKYwFp9JwmIjVCf4ihd1GO2Cw01Nej0Fqri1NabEUVOQWlFii2ZfWnN7AWSxMrfXeK7iD4Wli4z2IzPUWcoti/4zGdoJgMnargzjlqmjjVWqsd+hvJJr2h88zxPiXsxAFQQ4t5sle3VgsUQw07xcK+LxHNSYqIFK7TFOnuFJPbEComM40pU8RVubTDxFy4lFD3Dj6CYlNDWKDYzdGeqap1rEJyB8U4FrdfTlFU+PEouWBp5uoGxRgVEZFWrWoJEeTLRbRA0WQ4KKbx6G0PMRCJEI1iwKxuHYjBQmLRVmhsEFN/RDvkOIg8/OWGohlOEYsv5nPdDTPA1hJfEkP0+8HU0rU1vd8uWS/WNHSvqpIATRSoU5luvO9KMZQZRwIQQp6SoploM1NtjirVhsIfQlGdYmp+E3tapFhPmaBgR4TPlhRFBUSiymLHCb5bIb1GUU9TNOhorn7oHeO2DiQHO8KDKMaG5mmILTv7YDz1qmHR70WR6BRF34KISjpFQKB0B0UY5tSeQ74HRQC1RXQp0Yw9KkI9aija9tcpBoXMbPX4U7c1C0vkwRZnV1PoC+iEFij2HGUnO0lygWKLkX0kRUBqVbSooX0hIbqdJzlFJT8RJAIw1MiXklo7f00T2aRrjlRUESIzPjulpRDokUDErLW6ADctpe6wS6QHh4wx+2mHiBKytALCfkSh8MRbdJrYFHBQJEhwdnspFOS6pMESEfHQuv1aNTD1ghPYRNONtvG9VqNIRMxSa1b/aDdNQ2TxbRWxzD4P7InbSCJPgGVk3m+HWQJbLOi+Vi/kM4ypEknm8vp5kgeOY9PM7vqCG6tJk8agGBJlsmHpQS1hq1E0k+vr7WZAtZRC8UD1gAmp1ghrItF+LCzlsCQX1o2KyB0L6wcB1LknFBndMUMAFhiPk2dtMYBehmPB7ZPieRsd/6j2YqPxV1FlIrB72D5+O0jzmoqgGAt7J8VOZBYpwos3XB2oqmoVsXiyuv3GUDO3bnH/2oNjZxDj6+1MKLwMSmpOTxUIkGMn4PYck6McZT7N9U1+PaZK5OOlEEBklDtschguoLYnJgp1UaynKZJnF5ADBLhnq7ENp6dpf9fmAWKRI2yaJsaWJolurfIH6uYYO+qsH5MKjbawD93CNrltOjQl4BRFdPakNz7OInebJgK75jRH1HzgwjRpIebfPiNih1sLxgdQUk82zEcF0VRzUAiSRxpRNS+GiAhjrG3PtJ5ZkNorDcDiNFUbDEii9sNY66k31Q0s692YVolKMFKPB5uHHPFddMm/3YK3TMmUFJumnRfoqP0ofcX84BCi6qmUOYFTL1c5rrdPJ4sStbITh7GuFjsPgcLPivx1IhqIT4U9+hd3x4Y9Ue0yyHqiAWZ9ySkwbSFWzqBmO584RVQjs8LVQMRC+pm26pqFIJQfSiBY2afpH6M41bgL0QUFZHyod/zVDG8LW1IyrTQVRgaIcm0p7LXqIvA5TVR7ae+nSeTowSdsql4k9aaNLYmyWZy7TTPDE7mhtk35dO52M5exyWWnrAlAKUm0pQCfItpl63V63ZnHibYzjruurQfGPPkpeNpOKKmrmmg0T1kUbXTtzTunGUsOhR8c2BPMVWlreyp+3FG07w9L06Vaa/Wq4OCA7lX96WB2Ng4MDyPIhaWKVCt4iKeHV6DQPEb2cmEQET6CKKjJfyl+eCLh5bsTz+REa7VCKnUv2+1MCxH1RMea8bY7iELiBPv7IwqiLl/FiGatbBIFaq215/42no6oZT7A8mDvIDqKyALRRKEEWiTqbCJGlJJm5Dl8NFFzPoNolSpVQCCFcIZFlEBeFWyjdKJaq7Sqo8gcSCb2Mz8n6uBygSiRra1V0kYUK4i6nmD/QiPaMVJH1CsHLTwZySd3IQqyaYqIKWRzdJNoochfCqIKlbsRtVcjWtJmNKLpQYhIHauqgtTcE5d74EOISh1FGgbxg/OhjlVEPMmGiCLjqpn1/DTccekr9QHUsar4yRhCBaXq7QuU8+91tHCZ4Qxi/lCi6SbZ2VSzoqM6TwNupYMoCFbg3hMFEMFn869gtfvcESUmmFcTRGOnw7zcSRT4HkTzVJZMi7u8JahYJJrG3OX7LkQTlbiuY+5qze0VSTipxS1S2o7W70bUk8adqLn9UdFu1abGh6lZCvctBBLPqns/UGYmDk/YSnu6nBETv0bUKumSiUFEnvDsKsNr3Uu/vs0HUTF75vVr4YJ1/trdifZg665EQXSKqH2S7ZNElpoa0YRGtMmtEUUgI48sgpi0uhtsB1QaqfsgUF9yCcCD/SRZQgEi5sHY2rCauqwvOgbq4UTk5DsonwEhJA7vcsTg8BmRg+0Omycn2vdMIcHP/ZERnI4oopK+oRf1nctATlNK8NlkPlNfTx84xP2CVpEdgLMnysRWAu0KWRVAFqArGk7ONYnEACw2JxAEU4goQn7T4e+JEpH3J+gQIJdi4RwXhlD69i1xY4sMuSV1kIoQqGW8w6MG2rmivsKIRJ0208wRIFicnGJfqwd1FlbYxZ5UhEHKWHA+rYa8YnGFI5HDM8OIlZMoUciVm3GRYPC2wgjGqyDjowaS46/ZUQAuY/kBFxub7IcQrfZez8OxO1JBrFBFJ+ce5rjLCrffQKBSSmAWsvKSJGybin6FQ9NYtbzRGNxyWnAVfjjh7Bhsqh3sBEHTfoTn5nKeHjxFFC5e5m3FOotZbKv7cTmu1UKikqGTQNEuqJA+8aVz4zwM6tuSRRuhiDRCCM6L4rhIFX043ubYWkMYUZKcFHovrnuXvCqBWkZZ5Ou1qVRBgDqI1OCY9omYiz+Ycedk3T2NFzMRMzS3VlWbDrWvxAEMQ1R1IXnozslWrf0BY2yca0SNlV6crGbPBo14qxFlbs40USsd06b7GqefnuzppG8QERdGKENtwYcmIDX4lmNc1E1W42DC6bIuTBYWcEFEMalzeciOS4CYbDPnWquQHUEa0XjZ3Fv1i31azEErC+JDi3EVNKlVVfSDhsMhjaMBtUQOy4Y3cbLyIAUQ2C/d0eQeif49Ppvg4owhLXCA/xz0jZUCELrVDLBgR8pJFF3swb7miCSXkAjM8GTo00R7WTO9YKqrLbJ7km7ZbblNqhXePyUin4bTHEXaAicliTSaRbrOv5ZqFSRtzAQ07LCwyJ2FByz6gVN0JZZUu+BqA8MRFpZkqQ6WK5pR6rWka/2Wtds+aZqxgQWrn8NdFhmIo340rqPo/YDGDK5jnWgs4MIix3SzI4pUIe4niyBLlhWl3aAAhaWjwrdYkwGoe8ftQ8dUvkvBYqomdR1oaJM15Oy9LpSiX0c3ipxvmIq2yMCCENlhOVFG0vIgaoGjiMBKgNRRRZl5gHvzEUVQBN5Lw4BFXWXWwH0hOyyx2Uai9qJ9jiVKgVxsuGEtvnCKbtucBesHApFa6zSka2MJRgu7p10GvD9Oa+Blk+HiJTWNLnIHk64JmGlJP6bu6PZLDIDNvenoGpLX6FdmSqOV8mjSxSLdpratTGiRbjlFl/r5xuKHHs7P3BmEpzvm639Td9f8XDq0Bw8DNeGMyp5YqeZkktthzurKiGDlTBeMlYamUe9YkQXDpkaN+TTV0wJdV/Eaj7K8JVJPHukPqPt/G0PC0x9P0424j9Mtp+naxhCUhKBRTak6xGFnIlJGf3DWbW4r64wCFd8OD3aYcbYkjkWyDJRxrCIyeC9Mc/YITHCU0q0gOSqOIoN8mEMkDTXd5MWZrTQUG5gS7mAgbIOd3jFEPZ/gNF0ia1xGHd3gxey2dje68Dxjo8ugDMsCUFiZ/gJdnKLLnYP7m6BrR6+u2Bic3W2AMGnWa6KqpN3sZSlPD0AdXWicDFPqR0Dcowlr7otMhUqlmgMWBSddCTsXlmSBbrMwjW4Or/lNrJKEkymJChdDWGbuRAzieQPaUBCeGIQogjfWupMuupYgvoAcTJKOmyGVUqTW9IyMrkdY1EM/2anDNzroNlP8m6bLNFCtVUVtl8dqEfoPp/shohSu0mlRougTYnTt80w8GUhEBjNfZii8DsFyrWP0RFSyAWVYlZhrYyPNmLKn73R52LG1/itxKW4HAVhLKnMaECLkq5MdvTRDlEiMksDV9aWzSGYx2ZfacQKZ+0FmpzxkxcRWlJrK2XARB4b0pftwumlFuuypyB4J5erPZsqRmNMiJJ5lGEojjljSFipAkQmf2xh62hK4mlG1KUc4BLFFFMgn6CqpkXZFGiBLG9LR4IRg2eSnWGoosqGxLbWzafdSOGPblJVZRDJaY/Y1l1oW6fpzM1U4lxpd6XOEBojIW2doDi0yEZgLRWMQeHFySm7SVQAtk12hPV1XsOrB17YU5iE3IYdnFpuUFioekOvktHF1Jx79lNXCFd2UfTDJihFAIaLBWu/aGHP1W6gjHYZ0Sh2pSQgx8pPZu8fMnReIk5unDGZ2nE3uZ4bn08oIe+TsedSBKzoOCZtpqd4mZF1HBYRWq2Hk3ANuIdzMBGrHP75i/ZSJTJs6RMzyN7JUI1O3Rldt7KHUqkS3PVqcclY73G3KnqDTpuzkmqGz834KbNwG76SNLgjs/cbtUxxGWhHKYmHKwaC2gH5yC1AG5wkMrlUtxa/0bYZiyj5wRU4ZkTZk7McU4aUOFvZTlpzyAn8FO3/4lAmUqdTO08ypgE5NOVnIcUGQDt0SWS6GEuLwqfBpulZ9KaHLfGzGXTnyBXDhqMeLCHIN0mW/c8rJ1YukQRiGYaA2ny42oerqZPGxHo6zh1C3A7lI0Tslz99CQfkqKTQOfhc6yLRZqGkyRXYnVFNCnvDYliOHRL52xNQ2LIKzHqUwvM5crF14j5k60mLy03ysbFFpu9WMGzISo2bL048OZo7TP408W6+Pb4p+kXQ0j6DQaNLOuWL3Y8rW3GMxRaH9hyKnx+F04YXV7jcaqopqG00RPk0A5Z8J0lk2IOp5EF0LSHULx9CsvKOSZ+MfstE1NjrfSa/ErWvwl2oLnSReQI7QjoIivMBMJfvC3410hiEo2ht31YLdaitsLYIuE8Vqk4/SIsBZcmANSagXudOkY81CTfQTOTVl0yyZoqJhxBRg4ugj07Fr2zx1bGSt8DyqGauaICRxFDpkIprtvoLTY2M8C8B970XS0lgQnbm2874PId022kCA3/SxSBoKUpDH2unDSFunCFWA1J7Wx4fRk1ZprkS45xbVpTgzc7FqnwhNfAdpGyJ57wi12iSpH0ZaEfUByWmqUlV5gXTT1AjSFHCwJw0oE1VRy75QbaTp7rNGOnSINe5njdDUybh3JS0QU9GmrJk5r/8g3EHa9xpNkB2tJGmYtvLPSPb+XCRtDA7PffASaMP2CQJi0BxnK0EagLoMJz6JPSbH9+rqlbrtPk2aPQ7BXj7R48o7SCMl3AtpkjQwKJRTk6QGsEwdVU8N1yo1QvDsYpC9SLIExCjlwVVxx93a7OSRY1t6EE6TbkpGi2elW3myGHprEmhm7a6kiYqXj7lo2AKl1MRaNO5PFivGHZ42qJGw4T31MxsWgHqXjg8l7Twae5OkFRpZBC78jgmTdGqCZNXIIgzSQHQ304704M6yHWhnkYMvqbEWMzfBA1EXnU7S+Urb0pHmDEao4V5z3f2vKqRamxVN1kvSajzgsy6IP4Vxcn0AthWnkqS7vVY/y1wgrZEi6rbR9o+ZrIYWrOFCAUmaDWJDwYQaaM5CuuxZr82vtolJXDPwoaTjBN637a6kPV5gpKlGDnaQJnd5OgG+K+mBsrgp7MmCYo9mPyBwyXiMBcoF1HgwNY2RzHN+DTRrkw8c50zZkQ6ZIu9OgPB4DUE0a+zt7DRHmHYiJCQSgxoPtY2neMF1pbSJh9U29lLVblpJOoDAh5DW2LxUWAaDLfTFH0E6pdnGGb5U7B+ql7wiR8jN8jvphTU3TRj4qBRvaHyadFtzAgLra3M9VLWqJCljRe5H6zue90y5+dU48C/D0IdV71zznBNzUYDzAMxJ+/r0K9Xr4n7Nc0hVqqoWWIIUddh1gc+pzaNwWUhwkEg2iw8hx2FmRxdfzjadubIuP6aX+izAbiL2cC4DUW3JueJYMD/UTdwXxic+BGg2Xa0dx6h2kdVk+nhUPAfdK4GyHc016ySdYouDPR9qK4yMVgaxJerhK8SFQ73I+HedSx1DYjFxhYhEpZnfdpht325tMS2dtUNo3cT5rhMHEuUbaTf3p8MCLR8u8Y3VdSW27554auL9mvdbGdTTCdSYeNf43x+ktuaxXf7F0GhuDul7TLytOZJ17zpxcn7MvD97001qJ9iqHzlxNLFZnHi6JJQT7/mNuhYRge41+8l4aE1hNytFEaOxaESPQakvgOjSEhzjn/bGPT7mfn98T4O0NaNDgA5fZTNKHUDJ5/gaNn5Dsjqc2xcnjmipY5/2GYVoZScEWzfjBpclNQTln9ZsTNFhJ/ba6BAhR+Z+DV8v4dRtmb0lXdKMz1m9niupd1EWPx3N6KVFG7xZj8TFcPDzJA+NpneKRQEzl0w1AYWN08/byBSkapWUefbAcpt47IpUraRUuJgzYBAqmNDNeYPlZqDivp9eNkrhYAvXC44XrEaMgrcJzHztm988fPFFJrr9wvOkmKuc+cxnz3/uc/d+8YsI3ulMOVx7wo9qqZu4OvTtN90aLBNUvSlnl4tB0Vo9jRIpSRUlzRTC3skM2tG7C8mEvpx5IG5oLAKKikU4o5GPaTVbEQ0EiLRWpXbzUxqP0A+51WjUI2rpBXi+zxamQtv0UBimav0HJUsmUUEBj1q5y1w35Z2OhOZqdLLW5KJRX9D4yCQTBb3+xusRVGkKUURaNl+nYChPz0J5ZYzH+ClkHoWLqpV61Vpr31PaJm9853v8/VFvCWG/SeqWK/o/Z+qpzj6aeuOwD6H+nf/sr1z9mf/i4urqdBhCpTr7zebzw9nsU3/2z178wrMEsgzBU9Sr1M6juQt1Dkb/kLmTBT6+99w76knro6nnr4EtTlOnOB393lz3T4y6bd84jqH4KP1COxc2yHgn1/0jU6dXX3vVmiNR7o3WbEvbPkqhF82bIu+TJn2vA7hBZmYubJUD2XnMcwg7ZyYm1mIksL7ENdMDmjlN6s4TwDiOd1JXgJlLYVHRqragp6jnchDQR2gAJF5o1BdZPJVprbV2N1doDJgt7d5EsYr0YHiROuB7nJUxApUqZgPvSp2izEtExjouUtdX/sp/dv2v//WLm5u2XD1/GGuo6tb+/n3/0r/0xJ/4E4BmgCoWUG3Ad6EOoCsyU1Xj0X7xNT7AVjWdJZB3nzuozzdzpVDT9PWMh+CrZLyxjip3ULcS3JAiC6QjEyeCaHJfz/Z3Mt6dbG+Mp0C9G+P9Y7L9XRjvDra3IY21ip14AwAGh8LsubUZd3U84XE4T66yN6s14DJb39wATWAvtaJay+7sTlhOsZS7UZ7rE4X4qu1UEem4NjAJ6wUlGfMLwBHMpFZAH/f92ToSUaFWENOo+7VS+d2cukMcP/azGGOsj3OMZwX2XhCgqKo14kZSq7mUwzAE73YnZFEEi+4acAI00gzZ8E6W+MGNJDRvRWjkb3772+/+zM9c3NiYlJILOD75pNY6uXIlV35taenKX/2r+yLP/sk/qaoQKC/47dS6n3lTBKDltBh1Pb33LXQ05pliV6FhgmcWAxFgyy5T4SWaSxFGyL7gefI+W9tWDXehP5+3Z49jBaqT70piGnrS7n81LgTtpCvyVcK/cE3rbotBaNeG0pSNLddobO9vBUiJvmuUHw53tlK16WuXL2BMHgtiroq7085i6tE4m8VQazWNHiyEcEUicqeQ9JkBdClmJqG2vAvy6T5D17pZFRZR7KWz+7GboO+gwb+Wp6bhbecWA4IWxKaIoOQrpdvxQvSyPEW/omY2lKb+s1NZ55ROUcRYYnjRLhDUFGy8koNFKhGjW+RgGQBkW3DnmIU6fzJc7ORmtuZ1MaB3v/GNAr+pNK81KVeuqMaVS9aEDShEo9RxHBFsu+BMBkcyUa0VdWFUOf3QqhIb0uG8O6YPgFWTK3L6pvFHqnxq923RyNOqGnnHkMbceVymEQRdoK7wC25NifOpNM+PZD+bi9YWjjrFfr6ewX6n506NZ3yzumINDfXt7Ee1/6p9haz+oQt3u9S15XbpGyjjbIa70CJv2g61NdWDesQI8LJJ0saK3Vp09Shp3yytLwISGlZwAV218aRl9tOkxsWpLCzPqJcHwwyudzo/zWWe26r0A0D8y1gsIoVyxCnymdYDxScVZj85ss03qyZtAInKcoPT0qMbQM9M3rUw1t0WX1JXkYXlbABHwJHIysnJCfPq2lopRYP54UhD5ycnR0dHxyLLoFLKnQPgRTG8ywBsE3KOHEVSbf0degLejxRdtQ1RWw0X1Fz/HEDH3JoFev5xk9XwJmIAiwxgToGqQkrpGcAGHaO7gwHuOoDGAGnMJJ1YhrF/z4GuDTUiGm0A6hkgi/2DsfC6YwDoB2BsKL5uzAzwsLS01JjDlz9zX9R9rfa0+EcBipspUwBshTQyN9JnCwuQS2kh+8JRXWCYMAbQOo/aANxWBP5Mn4fSgW0D4Dj8rCp2t23EURCMF00LVOnUAFSZP3QArkEpyidVmxfnOmdhAMa8dx0ADL595AD8LLYfALuuitSyhQHMCW+dnAxEODrScZwuLU0mkzIMw2Qyn89nJyfHJydHs9nOfP72bPbZL36RLMOhkDXi+OgBzMeRcgDSUAAFBCVzVimGqVajAqmVk6Pu2ALYCvD3ywPfawAunYa6owTcGa98rwGckoI7B2AkWNmsbuLQlJowRz4AlQWHoAXhfQAOX76fAVSpdawm3EqaAyBgCMXhqRKRcemuZq01rXFYQhDcOeTINU0LDI0uUCJk3ZgsrNG3OIF79lHXCtgpFAiElmhlxzBdBXa4T6ZHvQfVnQMQreazcGxwHw+0qXApqlpiLxHHYMDCAHSxBByh+5ED6KaW45DWhjtQiR+3UNgVH0DDrta+mDoWiLHKYqyozyRxOeNiw94TqZ/+9NXnn394MiHAClNnJycgOpzPd2vdUX13NuPJpOIuA+Dw+cwAaMKuCD77GE4NgJjb+VDuRHNzGte2qnIlBZUivkREgFJc9Aofgv2fdmvuf+/ME8cGn2IDBgkMmjaAwMRUYrNUfQA5YQoBR3tRlAlRYg1qc0yFcScfFlClSswUUQOkKx7g0eQ8Cd1lAOp4gYmFM3xAWZ8K0BCtbdiT3Y33/VC9BeUW0BMTE5tG0UbLtbWqShWNlJ4I5SM8aPfTrDI17sJbCEh6YYp7N27vM7cQofO4lFOOjYinIFpMj+NTphzygw5smAH8nb/y/8oV7CUHAT5PoZw+xuE/LXhWsbOqyXnkctw+qKqxco62Gzv0r4799G6QKweswIu//PdF9ZWvf32N+fLu7sXl5apq590VOFqfluPjG7PZoch8ZXjj5d94/cVfD370nTHmcqKB4tzUxbg0vEENtULA1euffOSe73aoM2aDZJBWc7W4khocG/lYuQKhAEKG/ff4lqNWhf7+f/l/l4db6TVorr8hQrtIPY5V3WdXzxsRMEeojAipaCJbG/CIQyolZ+VYL7f9bam8+s/fr1IZFFfHJycQebMWrcwFCKuuzJw3GeWO5wBMyEGehjFAgWi1r4v8StlIHUixsdYEiBNze69GGp396wl0paii1ppBFHTPtwOWJKzhmGS0BqHfotrLVEfXNVrcPhOTIBophtlU0QplLrVK633gxJQAiLxz9bX3Ln83nKem1xsfBpuF69oBpfjXWDpvXiFL8A65iBRVTyH104LeXQrhbF5UMr4LUhtYy28JpvcvEkapCjBQgZnqjePji6urs3E8ns9H1Xp8XIERENWzT9z/9qsvL8425+ueUEaTGuDp5dG/Ilevfe6v/dL/Y2UFF9ae+9/+nv9jsnMsDnWP6GQz/f/2Zqe7NPgkYzDUsAFcPbZRPPcrf+fZ3/1j3fbELU0R3RRRC003ixnfpRDj0X4FVFGlcil5WXKMk0opVQTZrNf22kt3BGPEa9mTLiTaYhCoVqlVoHGmFWPwkYjU0D4ALO8lYUU6CxbXkAyslEIGoYM5pEWqVIkYUZieds+Ha95KPD1b35nay5ObsOhArRQneM2ax7AQBVqnjDnaxttaxjmkp3N5X54wzqGutGkQ4jZ/qBrSS7sqqi/9g18YhomnGS38z/msl+p4jq+0xieZCKC+JrN/QG+s2ityie2BXfkrnZarxW+FWmj+RGq7s/dfuvHGtRBnANg6PKRTjAIsXTizfv89PtZOrrR/VgLZ0K9NXtIWQonp11/+X62sYHUV17af/corf/RHP/9z8eUkHKoviHRvttedWKeJ6+k4z8LXFBiGwViLYO0NMhm0PUXjNjm4hGt/pc6dDAmzOp0jYwzJRFYdYeOlLu5SxanmQSM6/G/fGOs4IC70tUxpyjKI02nLGoev9pK8MJSZAwdBdRBVFgmhjoVUqDXYYDYDaHG9KrVthFo3HcQQAbshToSZh8Gb9RATiC2qgTSP4YIHvmdIy1lHhxiJvG2dmWuRuJk49XQ3htwqVSWmofj15cQMioLybgyH27c2Ns+BFvyXpN9YqRcVQZaKYIGv8/Od6nbz7f+0T4tC+zsTCD3AXCgBXhyPr2eCCYUfRqrszXopydZpvtFkGcl66fOfXNnYoFywNouUFI1/HRLlcIjo5vZem0nVq++vLy/j5ATjKJ987J219Y3FMQc89nUMrdGthWOVhUPM00vatJA9LFJoCQDT5W9/5TO/6/f5N52cxgYp4uYtkLMTAVxK44eYvEatDwHKLNY3O8yAfV0bFoy7qaRdld0WkXwMLrre4kdU1E7dCV6EvKCcQoxtpQwze/Qh/DDNJF97uMigqtVzOFVhDkCk3Rq8ZipoO566qoEagrvMKuJiTxG6Yv9LLlRsTLsaw6QurL2PlnMMWBhD4rs7xxBBvCiTssZDnl63mJ4IALfee7OqUCaWn7KRiI4iqa2MZXhx5yMKd5rp4pWfbV6EYUvv7W7rIag+grzIPKGj5+H73yx02cu4Rwsse85KZtvxazzNDOfGdHpheXrQ0p1S3SwMnJvUIXtn2BuXzm7e2N7JT/4vnvmvf/3bS8fHF/Dln33sgTekd5ebC6L5GzJUjADpWPyK9h91rosuxhKGsPuKAMArX/2VT/2OH+2ZGxpHuERsaXmqVapdMZB72HAPE2Sh27bxJOCN1JBsmSvXKmzQ2DKWljI4QqREZSg6V1EZx3GyNCHLcm2GH4hMTI6vm0TW8HtD3y6MAUyD2fEo1ojRu86KQ9kwhr7i7DZag7ltHFLjUhnKhimhKTRqx2O4OfGW60nO2pSOvob/2LGZDUe8XZ60TXfhdeQbQCMWJ+qac81e+LWfPz7Y68xgvu6GYiml87SQNklxe7vIjkbNDtUY8BLAQIe9yQnSDYE2B3GBx5sjElFGAp8cHdfQhtmBqReOE9Wnz57dPz46LmOYlYWBu5i7ysyd8hrShEfrSwXQmzt7AL781K/Vz/76V//9X1i7f+3Pf3v5T/7Hvyvm0/i4nwhOTXcRIdOd/0enh+l/WtTGdZwXLuIoMMIRwS2mxlVVqlSqKMVO8kGh/KSRcmycL3wEZ+YnfNmYoKSC4MwQYSYSpVKKYcM6VhRAUQoX7wnvfUnSpOXe+cK3wfggAoDS4AccHm+wibsUMLUnEdl00zclhWRuhSk9t3vFjz1VtGWZLAIjjs6PzBytnZooKOJa99zNBkxSSG0UrHHihygDYm9z6Zqh1g6cU4gVcO3Kq1iwA755GrH7YJb0Fk+ZhrvYW/iyBvvF4N198ctnugd2fS06PZB7BUSyZwwsPpxc4zauSh0t5iwLnO4ESXV6fuOBtbXviEgVaH6K1Fm+t32+ukyUgz71Oru6bGO+9vu+Mj03XcHK+5/a+/uff+lHv/ZUjqwbAwVTIQKBYRjcbmUYD93anQYRaLqsXyI890s/98yP/MFU9yAg5mScP5kM8/kIBDmDMWmvbJ1j9+ijmVOVQNkcJAdJOUTDwFyMmhWC51mmlQ84Mq1RdsbtiFuDLWxsfdCrl1MCKUihg6rY6G0cuZcE64nj2dCwEKizf2aMEhNrBP1cLKOZsDn4Zj2riiUnqvsVcVEaYL4Z+YEBxTDEstUVUM/ogh3KpQPfD6NQa4bOkYuQysW8cQtFEFEhuvbm66Fe/bLfxvSnTQZ6zo73E9ci8GnORhe8YxhFpEpoOwRr42hvJcTXYBJEg7T0NBp2Jg4l4Myv1m1t4baifvzn1s/fe56OgjFSCRhaQf7YzfYOj/Tm7gGAi2fWYjEA4KreWt5erudr3alX+XbweUMMGuHkbLhphyW5RtRVTYWlWMDdGjqxi0UHx8Zjvv0r/93nfuTH0N0H4FMrLKoQOxNx++QGExFAJTu6B1rGXldH6D5mClGYAFHEhQFpkEBE4ikiyVXGgCoSJ6+qqhHoJwCsTERKKtDSaVIT+lzHKm0Y9hHygn7A+6AE5gkhJgJENMF+IdYe2phdibtcTWnZzV3ZezGubvEguBn0/LZIXJTj5ekSetRIx9Xspk0j1cDb3C4OI7ZBwwNnVR+GBxtsGMwKvPQbv0ilReDzUb3Kz/9Xl9e2lIuf8SGohZRIAEuXC45Lg06x8Ako2tMWmhPHvVHmmcA1NEBod4U5dR4MlHOZ2BtToiVmOr+B23sE7/h5cHbtzPbBCS2FFrLbDBedq26OuardOSJKGQBsHZzYuC9srgP4Mz+D/3jj+PieY1zDH/3lS7QxMQCexj9Ruaa33nWnsLXhzk6gwxjOjUgYGEmInYK09LRrb15+JpvyLvJGIapQVvae71516P1l6x280b80sxJCwUT2eXq9CqCKMBEQR729q5wsWkqVysoSF5v4HXfMihZhjWEsbPJHDGPoot4NqHj425u8EcSSfqLoO7RIMmLWOHIpfmNlgo7ItSB36/0qDe0qMLAQfHe1p7F3dtwNArW75tsWZpSoMEuN23Gi9aTmMLQppQ+uvn79rcuB5NAJWcNBbQvNqgefa9iDHiXCM7KBQqRZL+1YbPF5IFgvkAQQzfYnbgnw1KwOumZr1lDAv+Kris17z92IGJiq0u09Dtrl4ubSPWfr1etC4O3DcuZi5zIgVEVcvxZskzjXlodB95zbvLWzj9BWt3YPofp/+Dv4I68wVic/8p35X/5D97y/6d2tWt6Kwz4CKRgFxFqUfPaFmTJnMBeCvP2YZ3ci9avBEl9FC6zYjK+/fbnWWkqenfrKmvvCxDwQxmgOU2jw2zxARCgl2YMXOsmownobah90CFDi/GTSIHFvEi12kIwtU7V7dic8jqNlGnHhPM1y1d2GwXcMg+DR4hb70DwHTn3pj0t4A9dYlhkrVUSF8+QGIKJa3TXmaLsUOM3Ewe/mlAjoWzwdBPJzMNW21zEMmOOG7o3mSIjIWCtFa4sYRjWdYtmF1L2a5EBVlZmHofT8EpQb97YodOo0a1nYVAJ1XxQXggR21v04S201C1BcRIjC147yjsXgDiIW1BRFUo4BGWBZWLk7X9fm8/uB/Rs7fO02PvYxdBgsDN1CpXQsN2mEY/r3L549A9DNnV3y+0b15z/9+I+9dHkDJ18Hrj9wqZiaM8SE0EhwfaCwS7pzVotatCkNm7gWCrBAsKPM3DC78EI7k/nSb/y9z/3w7yfvJVqriFva3E67xNPqfv0ekp5RA3tGJqL/v6UJMrvY9CeCLi18SlnBJYlUZexKzez/mLiiIjJGwno4j6Nj1HCPLD1YmYmKNYhNH8EEOHCFL2TnReQap4dZ7HIOCkA0esZlKWUIddIN1zdDY4HMO62kxraW/Ciq6Y60tOHwCdOJQsTRVDUq5jTDEhJ9mPoUlg4JxEvxwq/9/AII637uZ07S83SHZvovBio2QpzMtfiKIWj3KCsm4fZgCRTuUeQGb30hF/iAwiP2KGZq3f4U+cY4EnCrVjCLGhMwDwOgdzttzkVIy0PZDa7DArh49gyAmzt7UH3ls0/tPv4gv3fz+EtPd1NMFRXTNxHyWzCkweLTtDVay+dMEaJf0z1UBerpb19/6zXV35cJD3Fa79FQUU8c1I637U/c84gvPROUCyuHAwwIkfpdKWEYmArxKV5FWCliUnjve/tMhCJDEYtKrWSZ5KbPOdSZgx9jlOLtoIKf2wkQYTD0wmn0FjYy8advQ9MNvtJ+Y23oGx9/Nnlw7lTA8GQEMLwppie8xcM7L8wHapBbvZVmzxxosCYEWD1VjSz7MhxKT9KJkXzw1hvvv/na4m51a5+sYy5NGCvN0EeyXYhkXNsaRLR7cG/CCQSLo9g6aRVFDTBsmpyILHqieZTrKDbgVO5MjjZse5dxYX/eivL35FRcOCNVxvncBSqgRDDEgrXVfNYpryImdW592RbjaGMV910kibLxWM0wC+QPVEH1dZS22k0rxY4tCmYXEbTVldyMgLD2hWtvvnHtrTfue/iJHnmFuGtE5310LTFLzT9stth3367pjHAUAZaZ2C46D77oGNIAZSR2iHaPhFJ6OAhw4hGiHAmD2YW4QZMUnNxhc81svR1Ciwo5RFFnmRDp2I4FgfFJunkXb6rgLw/5dr2dXBrgdj8wUuxaszOAVMstbyPh7nohDWDpACNHAiButdJoR+RqBflJAvCtX/rvJLvGmYJYVORmOjTmjMbHOUZt7+SHNA1XUwoLoo5+AwFYxUwCNwBSFYhctwQdi8zc7HD/pj06ddCJ6nHnlajozrXbq4CCNqaTnbgRWl97dyDCrb3ChFu79nmrYRJguHCm5hOCViGyVjF7tZ6JTd+p9cJkiGi4AR/yG9xjwgqUC2fsF+oTz7poivgpg5bzG8bgChXgWGSDi3X5OhxHPd/yvYIZ/fXB1dfuf+RJND5BwmGEQtEq1lKmAKWUU9VvybHoFEr8Y02tLI84xVUhAldTHghCL9WhavqRMLEwtJoz2Y3kdPvbUyOJrXe1KKo6JL4JV2ARKhpnqapDAl8IVa1SRYRApdiJdG6KY4Xswdt7tOFjtEgiMy+waXNbST9sJAkmTVNEhUAppZTSxQD9P/YJhDq0IwjVrsKh/ym0emd1u5WMhUw0rKp2Bo4A1ItMFZ9rLXsjbtce3umQ9K/7RYlHhuZwKGS/91hYgdGKkDo5P3z4M4fz+eSVbx499Ombn/z8u7e3Hv7K3yivvzchWi5lOgzDMPD6ehUZa7UDFQLK/snAXEwXMjPRUApzseb215f14hzORKRvT+jjlQ9PTpaGgZmqiNkNAKNHaFG2ZwHBksm8VaVJVL6pt48H5tk4mr0V1VH1ZByXmeeqq9Pp0epkVsejcxv13Nrh2XVEXPH5X/35z/3IH7TnJNPmYiY/cEQXu51y+sadxioZBUyrtdDuNxe4MQUlYxjQyq+3zIVFtsAC1R63mtWVBL7ULY9NzR44lEyctOK+7BWm1oSdEOOGWuDb8ar960bfvh9JVe6LEls40icc8MpYhONuONO/lDXfsdK1du144rwuF9yemdWLlqQ9lMFHW0osBPqHf/DmG9ffeoMyHb3Weip+lksb+CXW3CtLBIteLnX/C1YI8YqZdK2b/NmUmMItuAk0AKD2GIeoZMc774KkaKihXSG3CBHufBHWJ5Oj46PpN/7Bg//g7y4zT0pZLmU6mawuL0+XlgCM8/nJbDafzWwErDqoToCB2e43GqzMeDJhov39/bMbG8kb792+vbK5OWdeJWLmmlAVmCR0ms9VhO0rUSGj8zkAiAQg9aUa7TovxHmk6sYweLCmlOlxPRnrZPcGv3kDx8crFzcPHr/v6Ow6Ed6/8uq9jzzpbTEA42fzEz2Tv938RgWoUVRAAFl+ZMmgIzJQCoLURb71W+DgDQlic8SUUc+3oSyQ5UDZNU1ESAoKiBSVrXtwhPUKFRtJiHHjMQ0RGHLziaiAJW649VBQtGhzaAzv0iQidaxEREO82Yo8NNEsHDo3NmouYmeQKSaW0JGIh+KnCBYET9c7ZIBSRrJ6geJgXdTuT8vCQ/cnv/3LP6da7dqRGgncZhttfmwL7mjQIFKyIrofanoYveT4IWAcWKtjp17EUtcjmFygofPgG9HtlqSFNXZejBmTrybl8d7dX8MXftdbz3/1kcnShde/MSMaiEwyl4ZhMgx+CY4qWfa8ePK6ioyqqH46V2czIjoxkHJmZW97279FdOHc+sH2LqmeRDsujf3STkVqLmDUYzfWML1YitWQM/PyykqtdbqyMs7nK8vLJ7PZ3t7eyfExRJaWly9duKDAzs4OqR7e3lu/vbdybv32I/dcf/P1+x97yliX8kyIXDiFhEBShZmVxA7JRYSY/QjKm+aAiJTA7UZpAhunSW8Moa3Dgc2xDAMigk/pImrfp5JKKRaEE4iZHwvfynwOImZv39FY13qMxKuAiT3rYdBOT6h1wVY1udOGMRQKhp8vm9EjsovnOI4zzY2rVNgOxRKbk8e31O5B7q1o/ise9G+NowyxOI+ntMdg0tYYo9gJE3LDvBuBWvSfopD6+ttXfK4MluaRUooB0hhaObah7QZovQeSoYkokaDoP65VRaUlH7ZB9kK4CJAbNkZEWaj7bhNtH53i1u5BwIteOdKHx5UBYK669so3b+3srE8m/dkGgFrrWOvJfD6fz/vWf1WVVCs5iMI4Jgocx3E+myHEcufa4fJkAiKpFUQarXwXdjm+S/mWSy0bL02m02EyKczrGxvz+dzU+mQy4WFYWV6utb711lt7x8fz+fxIVSeTtZWVtbW1cT6vtY6q89t757YPLi/9N5/54T/AXcREREg9w7exCgD4fTfOadafJAr4epBsu20VrJYkmJ8xGIhoiJ3gioEaQaBc6je/+c2bzz23xHz9uecGwvvf/NZANGEugKief+aZs888o8A9zz5r63MMXYpk9Ep46AtfhEuBCkShWjVa6gDaNfvV1KBR12EOcJqj1G026GBFFQBjhR1CqJhuMs/DZbfXi7mvRCrigcHvNRgAhXuLhezLYdisDUYsJqREUKbrV16fz2exJQ3lnnKQWvs/F1xqEreA4eFIAFBQFUXUlbV99ycjF838pBTXLra9YKkcut8J6014aiW0z+bH+ixoJIrL/QXeeO+9jeXlo3FMWCsGSIjm4zivdS4SaYAePByBAWCAiZZXVlR1nM+ZaBiGYTIxwVPVk1qXlpcBWMo+NM4DoqesqJqNM/Euw8DM1o5zaXmZAOvgZbBza2dnPo5QPT45YebpdLo0nYLog52dW8fHRDQRWVU9mc+plN1xFEBUC9FRrcPXX/v2r/x3z/zIjwVzQtW1JBe2lt2TyWQcx1or230OsRpVNZwTFZXWOLrZV7Iy297FNQY2N56JtLWwVRWxwPXX/tJfuvHcc0cvvbQyDMvDsFRKnc3um05t302JjC++eOPFFwHc/JmfUeDmOJ5lfms+v28ymRKp6vOxxQp84k/+qzef+gRUB2PKGh2MgSamhlNt9lU8UCSqdRxdbsOmLzAsQudou19H7XKIxJxE3LX5yup8ENRCGl1SawwGGSfPwaScdyt5hxtoYqP4+i/+7XE+ZmSxP/HQ7owIIW/pWWzvH53bWO0fzM2cBNi8G3hdSI9EL7ftm3e8r3HoGFGRHh+YFe7T1jor3BJf7niVb/0GgP1aZ4eH02xp4i5EQ7bVRBqAKouU0LCTUpam0+nq6tJkMjs+no/j6vr62WFlZWXFxvfWO+/c/+CD7rkNw+HBQWEeax0mE/b6W+dy6hYZ0axTaj2ezeYHB6I6n89P5vPDkxNveFxrKaUsLSnz1Z2dAkyITlRXDw/f/+wPf/LaK/deuDDb2zuZza4vl2H74GA+37h6Q3842CYAYJVaRdJW2ctI8F3YhmBmObq/qSoTwKToMgt7Hs6uPPB1tWm++Y1vfv0v/+X9559fK2V9Oj23vn5hY0Pm89lstr+3ZwMw8hkzs3/Xl5YAnBmGw1rv+/zn955/HjF4AL/2n/zFl4+P/9b29jA7mWkYettSGwI1MQxhtoF63gVKYdcwzZYgv+Vik3hYw8QEYldgHEdEchgURBjdaQxgY2gl7Da1Y2oNP1rzJE2jh1cfPO67sX3w1uVsv4a+J4YJc8v6bY4vgK29IxBt7R8BOLexCsRVerHaRHa6ad67v8UNOfZoGareoBZ2MUL3mV76qIPHDj4CxgLR7D9MbH4AuV89GwL6zX9QRJqj1JUNG08PpYzMdromqmMkqDLRoCqqk8lkZWNjbXV1ZTqdXLw4XVq6Meil6qZGaj1/cLC5uSmqx8fHY63rGxsiwuPIzHND5nEZgAk2Ec3nc/WAhDe+nI3jfD4XkbnFw2NDuFZSfffggFSPVT9x3323b90SkQcvfxMrK5jPT8ZxXuva3vyEaBn4tZ/+6Qd/4n8Tbo5mWj4yIAS/wsZWqXXnuIONKyo6CW9gUL1gaGzBEW/+T9U+BlV961vf+q/+zX/zTCnLRFVVaj08PFxmnk4mS0tLS5PJiQiia3QJ5yLFWIFr8/naZz97+ZvfvH8Ygi3oq/v7P7C+/urJCayxO6KpTeBrO+nyLqoEa7oHgMZxVBEmsjROVVQVxL1SzvruA1JhthrllOHgKtWFExpnaLsdphN+pPRqoFofT9SKzOdzMrDHDNBCQZLxYgDC2++/ffb8BWRYuXNB3bdJMVjYSdKyhxBmUb24uUYgWKO8flYNqLYpxbsN1YfL3buuZgO4HSUpcKqqHgvCPZYp4sAwMQqAndOg21/c6ZuZ6iS9mIj0lFImw7C6vLx3cGATG1VH1ao6EE1VeRwt5jGKYByryPESKy/t7u8bT93c379x69bq2tre/j4TnUR1zuHx8cnxsfnVNZQNEVWR2Tiy/TCfi8hsHO0DNQRLYvAicv3o6EhkmYiJ3r5xY62Uk3G0ryxNp5Pj43EcrWe9iAxEr339H3z8i78r7aypSGptoQhAcia1i0daCKZKdexNxMwlAjTJDAbNpbVOzyidYWcB8Ms//dPFztWJRtWjcVyZTI7HcW19fToMUqsh+WoyDJSweaR6KHLx2WcffPnld1988eEvfenwuecyHPqFtbX3x/FfPHv2Z7e2BmsplPHhlB97h6jd7elpEnZrlhVnqXrTmuw+FZzbcyhUObLMbbqjVDtPzAwqCyRwAydQhdvLxfFEhSJJrRQYrJRiOV4xHPZO7IFkrnzrN/xAhpLt5cb2nm1DBNwIwKWz637rooH+EERVXDp/5ub2HoEunt1AOqshhSFq2skv4JeYdxk5qq3o3PhAJctGouCKg4c1emzHuhJkHAP7IzQCAFq9sKnb+7nwTbcAAM5GF1GzvYUZIpXIGgAXZh2GoZR5raTKgABzkTkwEsl8fnN7+/joSKIv0uzc+v7uUY3o4/jwp99/+2UiGsdxsrQ0jqO5QtYoP/Qv/PARqCI1GhtZcFZDRQogwBjidGMcB6LdcQTRdBiM+lyk1Hp8crI6nZo1Ozk5MQGYBOqeTCbJPBS3IoAzuqDjfMz0ASPNwT3UmsPdwcyGsDiYmVBFGMTsnZg1UOQbX//6W889t848AqMqAY/ee+9bH3ww1lpKObuxMV1e3hDZ2dkxz0XCzhUiHgZ96qmjN95YXVv74ODgK6++Odnd/aG1tRSFs5PJca1/9NKlIRSP6afWHqOP3dhURrvxzRrVEDF7e0ovMAhoYVFyittQu9ReIvaLv1hYRed2SZSLR4JBqAo7cPmQ8UAtTdzH494Iq3jpCRFBYYqfiMpQrr9z2VBmxK/05vZu0AzGJiLQB7d2Lmyum4K9ub3X4lJEN7f3bR43dw4unttImG3IOPa3WVqASElIW3wgbWYnXQ7g+8S1BfPraZVWaN1dH4SMuYvC8spLF1PsdbICuyKbMTJ73w4alpaWfBPDHzPwbO7xTHWmqsDO4eF8NkP4LOMSnewfZsv73eG923t7J8AykR4eSkTsQgm1LBqJ64WkA/OUA1M16RWAgQ/m84HoQGRUXbl09vDW7grRCAyq81r3jo6mk8ny0tIwDKUUqpVUB6Jl5q//pZ9+6ks/3BI5iTJFEYmEAZMlVeVShkCGhADMrjc9Dtf4GRGSJOLCwzCoN1RG9bJiEOHVb35DVOeqBZgAT1269NbNmzPVg3Hkg4OhlDMbGzQMS9OpHB9XkWPVOdES0XIpSysr9127tnHxIpfCt28fjvVkHC/PZp8JGa6qJ8CPra+bBUVeVHeqBUHoIalV6jhmWo57BUM2/oEhbVUtFrgK3mEmjTi2p3yIJ08henY1S0IE6zrDXtV7Z0uEKKiynrJA9JRmEKzEN1yd7N5+4+0r3qE+hOLm9l7ITqBnRSZL3djaBXBxcyM8pwVjZgO5fmsbwKWzGx647j7RAS33CLLLUIqThu+HgHDt601225m+OVkECPTc+rKV9SFFVFWBIbqWt7hWfMYWbbaw4AAwP78xlALg8MzKcHMm4dqRdb1XFWCuKiKT+XysdYjK+8NxrLOZ7T4B9N4bx6XMVaVrxUYRl0+84CJqxjb87QJ4V1dVjVRNAW7M50w0U52JCFCvb60yVyKT8EG1MJdSqqpl4BVmO/eaEFXRV772S5/+od/7oSxk3pSFThSsbL1kUgc7zEwg4yDVt9EgmnuDFE6z4XP10PcrX//GiWpRnRCdiHxwcLB7crJEBGAyjntHR0tLS9OlpenysoUDWOTYIuG1LolMhwFPPvl3f+7nZsC7Fy98jN48v7n58MWLwzCMEfeZTCYDB2G/TaeDdgZ1zFyI2HEghqF4O8xOVkzE46YGijKoEGxX7GQ9JW0Ls73tlStXLFHb7IgBmDzXtWEkhA6HWKVKrZUIpQyTySTNZAy/byZN3/mV/9aGRPDqRmgXx9V8egqzAnr99laHezM45yRM2G9s7V44s977qIvVreo9IuzWvkTB8dgW/6RW+I4k2QcKPIDvtbGXzm/e3N4z0c/NorByp14KLJfC0Vo0V4oBubV7a2W+srIy2RlPVAeiA1UmOj63jps7TESqc9VRdYlIgSGeP1eciNQw+FX1SATA3KCIaiZjtTWOfy3Q7cbWjl5ENK+8AQS4ZZe5qE6I9kVWmUeLkKsKUY3cb7vzZTKdWu4HizDRQFS/+5avsNXo2/qoR0eN3chSu6sIqnIZOLsLZ7BLpQrg7YtVO66mCE4qZFw4gNfwf5760pfe+uY3B6AAa5PJe3t7SxFsqktLt4+PV5eXeXl5eXn5+OhIamWgAO/MZvcB9eBg//j4/V/6pe1ajy88cPH1b3zszJn7z5yZLC/XcVxZXUXE4cIBpkw0M0bJixAUiirVZsLMrbtyZzUsfcxGLyJZp2o30lDDTnE2Y66RyOXLl9N4Nf82vIi2/RTNflKAxXNd+hSuNiK1PFg/L9l6901/CIAKKM6sTm/t7Ifz6Fx5YXP91s5ezjoZjk5JRQtbAIprt26r6gVrNOPKJq1tbK227wJojRZVFZrmFAHtKC8NcQ8OmhGrpKx64czazZ39MBF0bnl5C7vdOB0fCXBQ67lSuF0vACI6ErlndXVzbW1zYwNEBwcH2/P52jjOal26vScXN+X6lh3cHYssM2v0TAQw1nFsya7AhTP7t3YBjHYKBUy6blpmVDkws4SNreqVOxzAnqIE0piGozDjcBzPD0Ptvq6qdpNrFdG4wrMwF1UCpkTf+E9/+lO/458d6+h4MAI91nHd3TWTQdFa6zAM5NKbRtdXPNN1JK7wNXxI0bEsGaZ4C1QxKzUCM6CoXijlSq0CsAiXsru3tzGZHJ2cmAM/XVmZzWbMPKheHIYjEZnPh3G89C//y8s//dMP7l6/CtzY39/b27s1jrP5fDqdGpeO4zhw8TbRqVYU6G85UNU6+gWNAI1jbd7eImtLrYLwVYNxF2LNPRBN7vbkFcPB7nvY0Sx1QdzujM3qKA1F+D3jiCHleIjIpHz72ttHhwddfM1HvTrh23uHoSMIwDuHB+gcJMPFTHT+zOr2/lEOu82u8zLfv3FyfmNVgbHKvFaAhtISqdN+b+0dNeEP0x8hEwDNdaQIaWgncoHUfKeu395uW6C4tru7jIVXj1oJKMBjm5vX9vYmzASsWay+1vl8zkTjbKaqlrMxqA63didExyIEHIvs1LqS8Qjg6IPbq8zmqY7AwbVbq8ydBjU9CbOZ2eNWeiCdDjDcUCuwa1wen7cf7KxlFoFxix2L2W3VcRxXl5eXlpbms5lZpGVmIprf3Hn+V//7Z3/0xx0v57JrSG9spYiKzA0zU6x1RL/YYFTKcLrHVUXDOlGfMEfEpYjUf/6nfvLn/pP/hEVWSnnt+HgEmHkODCJEdDKOR7PZweOfe+TNl2YivLx8cnjIwArzca3Hqkz063/xLx6pDp/9LL72tbPDcHRwcGM2E5FyeGh7ykR2tYp2RnKBaQBkpkQel5l8DcNAburcBewbjuSdYrBopPUfig5YBmzefPPNdBf9vJzIM/+15+DTQ/LL1/OaFfuYiA8pRmJfv/KtX5e2WxoMDUDPrC4TYWvvkJAXTQS7k+8hAVt7hyE/p41xaGkAuLl7qNBz66t5xbbdL+s7Lv6cO179zFJnEXlEPa7q0cZ7Z9dXtvePHORFFxBD7TsPPFWILr73Wk/g+KFP6tWXzKNgIjAjS21EUGsdR/PIl0qZExWiMbKvJsxc61z1QOQI7ksAmJxbv761b2s1Vx2IDi1xOtQfA3NgJjIE+lVzp1UvXDpr0eazFzfV5VxGhao+eHETncJiou1buxugw1s7851DySUnKh1zDswr02lRD4tOmVmV9o5f/X/+uQfOPnDpmWds16wa4RQv+QYQxjouMXsHfBHD5MikAEBVb33726R67plnqGXvO7fAUsgjrMvEAv3DP/mTf+cv/AUBbta6TDQhmgBzgIFrDz09vPvKmW/8yu3l5QsbG+PJCavOjo6gusJ8exzfmc9H1Se/8IWvfu1rBOyP4wrRwXyuANcKz/fVIW5UAQKZuYJXVdV5HU38hmHwa+YtYBStrZDGyjyiPBwfWsu45HpikiqAX81uO2HZOaKKUSz/KAFM4sw2pO4IvnBpV79H4VSeJEutorp97e29m9fKsITePVSAkLkWqysrN3cPVPPqCCvHQ5+zvGDcEcuQIe3uDwezev7MGhJU+bLi9t7BdLoUk7ZvmdDp+Y2127sHsUoLj0vWQeoy6K39E0L06Yx52ZfPvvvqJP3qeJ3df/e9tHVEImJFgkykqvP5nI+PmWg+n3tgAijMbBegAisXN+X9m9dms817zpnNNCdKNldN1164dK6CmDBzhrArbXHfPedyHS7ccw4ATuk/U5eBRNpKk4MxBe554L5bO3tjrcfbB8mlmlIocnR4WFSLqtS6OgxzYDaOBJRSpvsn//Df+rfOP/vsTOT8s8/e+/nPa7RKT39EROo4Arjy3HP7L720zPzA5z+/xGyx9xORZeZjkfXJ5Pbf+Bt1HEVk/emnH/hjfwxEXMpMVYke+Pzn2WLAxcOrNot/4V//15no5//CX6hAIZqJTJiLCDNPGFV17/i4DMPhbCbjiCh1WlPdBnbHcQZ8+x/+QwE++cUvnrzwwm6tIRAOB2aq9Nprr5kMmOHyNYo7/sb5XFWJeDqdDsPgkLUFXf3Gce0Cqs1BDMTVgFfuH9HVK1cakExJCcURj4rrlJCbpnb2TUTDMCwtLfVQ0z4WMVtV1f1b1y5/9e+lMFDrB9tSsfxogejG1m4LvCwq6QgpU65VTqRJesecF89u+EQieHaju5QkYFda9ED/nSffPYz6IS0OC53A643feH6ytTfNw7dwMk9EDs5trG7tPbS0NCWaA/cOAxEtEU2Yl4ZhGIZCVEVOZrO5yCgyUz0xqytyInL4xIOrn3j00rlNaiPNhTF/NrBkG5KnA3UoRdsMyDYpEuDCZ6O40uPm7j5ZOWp85cX/6u9dHIbNUpaZl4lWSlllXi5lZRhWl5fZKlxrFY/BwBJCLBXeEbsqARUYI4FUuo55j/3ET+wDJ//1fz0BVj7zmVvf/CYAZr74+c/f+MY3bnziqQfffW9ZZJzPLU/7wX/1X9167rmTWvdfeGFUnYtcevbZg5dfrqr3PfuszbYCovrVr371uf39CdEZ5lXmKfPB2Xse3Ltl8bZpKVOiM8NQVI2bRfVY5PmjI/r0py9/61sEFKKPT6dDCkgM/tAK+lOlGUd5QTNURjtW4MlkwsyiAjnVHMOdeFu17IJhmCSDKGkjSuEI46GX62RCDzeg3eCu0ZHIe6QpFO0IbsGvNp7ouFpEbrz+QlnsyeI/GILssp9I9Z6z6ze29+IhcfQf39RmhhdNZAheRyK0VczzxtZuVM95okerFm/Lky4/3SGnlCC/n0RnSPyd+1ZWbh8fLw4NTLSytWeSxEtLxycn7CBKxcrcRMawbKRKRAMwqhaA7AciJrq1vXvp3KYNo89RzXbUZN1Jc2m0U4WUP+UCWljdbbbpXIdGjk3EG3xAAWxcOqdbexnwHRII1Do7PLSVptB9Jdy0hONtTcjP5aF6KLIyDJbJuP83/+bn/tbfujyZnB2KKPQ737G8ziVgmfnCMLn2wAObb745tbyXz3zm/f/iv+BxBNEGMwG1lOMXXvj4v/KvEMAcJXqAAn/4mWc+941vHL7wgmmQgYjGw6WVFY12XEciJDIhmquuMyvztJQvED31p/7U7Vr/wr/xb6wy3x7HtIwjQMCjX/jC7re+NRBzFsCpB4/CtTWg6zZHW5oHoHaJY42cpdD6vjORXJGmAJ2xIdXLVy6HS2nWT6HqbXItO62r+2lo1ILjEtcUgjSaRTd2DvgA4GDr+u6N95J+Z9TiYqIINmtEvM9adDpQaC9GpxynxKRNXLv363xsfMPtarmYBvxSMuoNV/fXeNAiufZWt3g+dVUc3dz6oJT719ZG1f3jY0oZVj0QOcMM4O2Dg6r6+GRiTzgVJZvZsURAoBKEj27cXnryQUCv3bypiktnN72ndFM6/UItrlMvRNrpuuTH/LyleAsEKv01egBAEyINTIFON0FVrPpf9cqDFy/uHW3sHCDS6SVNRb9SIcBnmIenn64vvmhv7j3//N63v338ne+oyPozz+y8/PK6CKleeOaZ6QsvXHj66Q8ee+zG88+r6toLLzw4mdz7x//4/vPPHzz/POI0+8Z//p+fnjYAYBVYjWPaWAsA2Ku1AufNOSVSD4BJBdaefvqFf/ffvfcnfuIn/6P/aFIKM1PhUfH0l79s3xXIlX/4jYEA4iIqeQJsEitSLQdjmAwWf1bRitpEUYGIBGLxthR03E9hQNOwW9eWbEPjbEDZlKiNAap+MG1sziQipbBGsRoQNxP0AhrXRl175Vv+OPs3fiFQdV5JWYnQmeLs6hTAzZ19H4NrMHtM4/YFi4sWojQFP9Y51KuxUCHjuLCfPb+3p3VKe+H3hc+52uCo8+peZuJvHx5uLC0RcGltbff4WJlvHx+veqqpbTziZB7pfpRhqKpTojzXySau9q9ITSD/wa3bF89tUGAF7ZFMmIEUE2oR3VjEO+d9x1TNLw2QRiAUYG6LrErMc9WpAbFhGIgG4PrJyZHq8cWzF0+qOasGKBYeHosWFllXiA4iiHv80kvrn/nM7KWXCDj89rfPLy+f+cLn9577NlQLs7z00kXVx778ZQWOXngBqtd/5mfclhAJ0VrcqI7FzJycaTItkYeBpNaHNzb+5uf+4Fjlj7748wDmteo4kuoX/8P/8Kv/9r89YX78858vhYulPIPyXm4Aj37pi0Ok2gJBwI7F7EhoGJitT7wqRb8Jg5cW6eUIIFGOL/W6KogEhoKdV0T16pUrRJE8ac3s4c9sxsddJnWdEJ0T7B2/cxAK0WTl5I8cFRFBpHumhEdFebkNAKJCw8CRz2eNOoIl85av2HfqBUxE9OLm2q3dQ2JqIF9VxhHWdMLi5N4zxNanz8kKgVzY9s5XRNPWaVERTK+dziKiIfz6w/mcgBsHBwLMx5GJzjJn86ClyKay18ra2s7+fq11WF+/sbV1YRgo5ASxB6KqtXXEBHBza9eay7Y3e12pee8pFApe7HbqV+n2sQZzi4qFrlRwYWPlxtaeTdGCdXVzdby10zKlicC8vLq6trS0vLRUmDdVH5rj/DDUS5eODw9FZBw923l+cmJsydnDNV4n3/lOGQbbmr+zs3G5yp9YXVVVOzPf//bzyysrl+97/Jn7N8daSbwN69rv/J1Xvva1M9PpjaOj9en03tXV+vGP83e/C4BLqeM4WVqyLHHbnDIM43y+vLpa53M7uAbw8gcffOaBB2qtu098XlWH1355nM+1VjDX+fwb/86/c/tb3/rgG9944o//cREle5q2vH3zYga4CHhrJYuh1VotmbEMBSAk88EaAYi9UzxBpUWqkHxtu0NKKOwGWDXaZmlcZsFE7utYJhJRCgFo4boNjdIz8369mqIDkmkebdV2Pnhn6/1IxyEiYlDh0mJO5gLAWtuO/RVeljyWRbP93UGOFNQzXURVb2zvEZdLm2du7uyaR3Fxcz0FT0XAZMWZzrPZzyE5391lk8o+IzoxNrqvNNB6ypRl1VGzh6oEPDCZbNW6U+uFYQCwbs3EYzpHh4dTolH1137gX3z47/yn+TQJ6V0m2m3YQ1OLeTOgIGYL2QYUERGboDUxUhldG1JsHUUHZ7jishYzWuXM6tKtnQPnQi4CsnNgiwzRZLK2tnbhwoX15eVSyvLS0trq6mQYxvn85OSEzp8fx/Hk5MTSsE+Ojiz9cDqdZmsbe93a2zsznVoCxpn77x1u3Vi3Xl/W0pmIiD67f13fuFnyPJtIRD5+//1E9Oh0SszjOOLGDT13TjqDrxkpIFLV5dXVo+NjBcpkYjMtwPbenqr+ob/6f54QbTOPtardRqR6+2tfY6ITIIMC1I4cTBIVwDAZIuFTAYCZ7Do/BnvNYOfeOUdq362rMZiti6hkzBYCobZeBLp85Uof77Hz2HRifR/JFAo1johjWku0LHHxQjesBfyvqlee+4357MTVuyup0n1Jmyx5w7y2QLd3dzt58TqRFiFOjKhqoQGo3tjePbexKiI3t/fm1qgtX0TWSKAZzLuB49iXpo7igyEz+d2GOBo+Pd7aZeqOlQAA9wwDiKwF+lK8f/90asDmWMSEmZmnwA/8/b+qGdUHJHIndkVGkVrHWBO3zFbbkC6GZc3aZikENRSrq0FH+NS9416UdP5JaEebqMJSN+Z1HJfOrO6pzlSnqpWoAFvHx5OdnZPj4/WVFYjs7++fWV8fhsFmV1VXVldrrVVk+emn5fXXbV+sGwl7TTVmpZxfXTUcfPbs9NyI9c1N6tstqNddSq1gtmaLw2RiMziezRI51XFsuFFbMtRY69IwnIzj8dGRVQUz80GtUN3e32ci1HoSHSpMb1rqhR2/T8rEEihUdTCD2r0Gt6guinb9oWrU6HE0ng/vTpX9IgmKTleAX7Cg8FuPBTaFUNnNVqjjvkCKjq1NLXNcFBiZWPYEG5jlEqs1r+NoDNoPTH3Yxuz7t66VYfDcElM4mesaphQozZwicAllwWHclGKz5WJmv9Yq1dPROC7yEKk3tnZMCDWWNOZIIrU/a0rxpf5/1P4FPLdnEe0iJVkDf6bi0irnVldPjo8BLDOfiFhoxHZivRQ7RbywvHxuaWkcx1LK+jCYVhuYiWgC7+EKkdl8PlcdgRPVZaKTs+t2zI/oZ0AWxy7FMbYoEwAhR1uA9/HUiPGl0920X9Ps5MrTPQl45SNzu25x7fzG9S4B84NpqQ9fuvn2jbPb249ubt5SXZ5Ot7e3VXV5OiVmqxU3ApOtLY1Tklx/6+l17eBANzfns9lY640bWwfbuL29nSs+tzZgFiOIdiIuliGuVWRpGE7mcysHsC9K1Bba/qkpojymVb01judKOepaiPmf7JSLaFQ9Frn47LOTyUTnKlWsT9gwTMwDNZEZ7KFepKLqjRBEy1Jh9nab0beaKD8ZLiyaQbKxkhLaRTILrIerb74ZuVNt5+yH9O9HEY0u8wn4gchmU3jFhPFF+r9dvY+o7Fx7B5GPaokyA4F5iIJp1xiiqLVKHeGOtVuw85trAG5t76FdQ6NUJQOpSDOU3mpT2LixvXNuY42JrIvyrZ09NL9W26ztipHo35/ub0ryXYNYdidgQxLxYb61d+XwcDmasG6Gs0dxamIPe2B9/fjoaFL8Xq3CPAxDYbbsMevDNhDt7+yMwExVgL1ah8217OxJEZW0PMaclBIVWji6T6xj/3g7+Vg8crYK5du1yabgjUtn12/tHCDM2uZj98+uXqtEDAznzxy9dX157+gYeFNnm/snk6Mj053c9WM0npgMw+5stjYMaU5TFKWU67dumf17cfog3/7KB7dutVF6/rNvhXUgyAwtE4ca89XYvuQK6bZwHrcOADhWZdVZyG0NK2Z/natWkQPVw1r/2J/7cyAqXFSUFHWspQyBv0BZXmIavYbTP5lMrB6akIET27gozk77aP2fWSULCRf7lVhzaY6CpwYQu7lRd3+CFSGQ36zjk8oH+kPMVUZeLweirBpXVbz/3W9NV1cN9drVih3SR1pC9vlHVCDkAYCo3D9dubm9G+ax/ck5trnevm1ZhwSFYehLZzYBHNZggGaKmp3Hh7w0jJX5D/b5i2c3EEWqCMUhQL2xvf/qO6vDAJEloiFTTcLgGIQeVff29+9ZXQUwnU6tTfuklJXpdGV1dRzH2XxeRY7H0SqQLE3yRPXCQw801BR8Nl2Ji4JbJEJ996MzK0WKuzVYzm6rBpaiKrXD2qKW7ExxlnFYLRgBqN6a3DhWHUQ2mZ+8uf/yPWfHYZBzZ67X8RNb+8fjmCjdWTzxSa1Q3T9xl6qHNDyOeYvF1stfffzlXz+czdquJNiMnwEgSneCM1I7I/dGgjko3hRrYEQE4IP5/OIwnPjNwyTh2wMYVe393Vr/13/+/213ilIhFg/K1HFkA/BEqtGVUjyRRRI8+4CyPygB2c3JecNbWxkPD6UYVohkGEUXly7EV69eXcCCrWdvS5+EkQ6EkBJZpeYD4+hPVZWJKWbSPRnHu1uTpeV0KYNJPFLmWRQBdgPuavtX1S74vf/S+Zvb+034mwF25NsFdtqe50+39/YvnTtji9krkO6X7s6rBfxM1MekVVT10rkzBGDgBSKEApzc3OILZyYi9YPbCA4GvMIWIcDDdHpc62wc19fWhslkKGVpMlkahrW1tdXV1XE+Pzw6GkW2bt+e1ZqcNFzYLCXviXf8BWDn6KTbT9xzbhPAjdvbgRLRy+bC/90JLYgIuHT+bNuXsOv3nj97Y2uHmC+dPzcsTd8rw/YrV66fnJxl/kzdfPfTT1ap525s7YlMyCOgFNH4tFQ6jlk4kSNyIB2Vkgr82X/+879+/RtH17soYwzVyjZ6VJm2TbsHanzS5pgFG5GND1G1Kutjs0uAubs1bsOZqR6L/Mif/tP/zE/+aT8iBezc1UjVWi1P2xhnkDgeiApztTCAeI+J1uBCF/bBq5c07gQM78JNTZ+na/cMS5ekk7uYe9xZNx9KbrO6N+pv1MijDrfKLnYKvAbdv/VB/ikWm3uRiSV39UFUkrfMIHS+gdxzfvPG1o61qfMJRn+JeGYeNcUzugnd3NqLOWj+ndzAUgTugvXa6jWB9hh/dDPFgqj7D9Pj8dJk9dtvvrkSl4bEyCiHcnEyeWB5eZzPywP3Tg5OlldWrJPFdGlpZWVlfX39+OjoeDY7Oj7e3t+vwExkBM4Pw+GnHvdLnqVKAH7qLI/9cmNrB8T3XroA4PrtHdipvlgxEhMxotUfhSE3BQoY+sIHN2+le0KhL4hAzFBcv70F6P1PP7F74/berZ0XDw9vX33nyZ2Dm3t7s3EUYBJxKYS0FGDpvot66dzshdfm0Wag54KZ6pRoJqLAvZ///L7IzvPPL9stArGTybcpqPZrE+ZOIempf6NectaZ6JkqAwe1VtURSLBzIPL7fuqn5iJ/4Kd+yiahXQEjE4P9fHc2mw3DQKVwXC8aPrdoFBFRColzrcZ9EKF5FjKbXFu1axzQgVZVvWJnv5Ss3h3uG/aSsOUdwLZRxGWtfv67MLZsOi01x/buS1+fz+eLCnQRNnVoOP7kiLpb/+bAbK6vQfXm9m57TqQJhrGkYGMXyCh/TBMaOqO94//XnajE0MKhQv8t1SNrS8iMPq0LStDd7Z353lHan35myWdnlpYOjo+nw7C0tTdZW2PrhGD55MwnJyf7h4d7Bwe3d3YOTk4Ox/FYdSZyY3NtY3355OgolsbH33BYmzIAffNgH9CLZ9dvbe2BCNR6XEQYPyP/CzcztMX0ZmnVV6lL0LH/e/yHPvvqP3ju5Pbu8fHxjevXV5gFWCJaUgVQwuhZvGPt2k3U8dp8vpSAMVdGdaaaLQp+5E/8if/4J3/yyen0JLqLJzZra91JqwIV4PhXFz9mU7avV2sSpArAJHYaN8gdq56o/uGf/MkZ8Id/6qe6GB8aNAtDw1xEldROebkQgXmAnf3WqojWJHH2G/JmdwGLpYDYMpdOOPuXiWVLk7JhEJiSSWmB++Dbiv5AufV1savkvIFIKcWVbI5NvQYjlAsB+ODN10J3O2oO0NDxkQjFsNqm9oab+gVUCFYHvbV7mEJKiyrB5wGc31i56QX6MYhTMo8FPN1gc/zdaFpInCMopapjjWxY1Sw7sXeO57OdgwOrQ9LEJiHNaq2qLLjAPEwmIJoMAxFJrUfHx1Lr9snJ7v7+7v7+rf39o3GciZyIHIksP3xpPjt2qo2rgMhIb7nL3YK8f/124jJacIgS6YRtcHjlQMRmajl7xjOu/VRzTxV4+ItP7d/evfnGe9e39paZDTwXi6iH72GSuQXM3vngWLoDhVBqnr0PAHj0C184UV1ivt61GTd2mmsLhiVXw5pdhUYwPiegqjJRjc8r8PAXvvDEF7+ofiwnn/jSl6584xtMPKr+nh/5kdWnn24cFP6YxjmMMYDU6OtqrUFIPaMDqtBBxLrTVCLrtk52JJARhiw9KlHmahGp3FBR9azphmEgIrB7nyxPyy8MDeuaDKya1lsd+5FdPuZ7aSnZDsihnoPZGEPDN7ax7X7wDqIFMTq9mBabbKClnU9QrpvEKDj4kfxLGAjge86fgerNnX140DToq2ag7/buQelv/TmNndsP5zdWb+0eZHQ9jbld/xfarvESpUnQnJE1saRycfPcSId7e+iyIJPcQLRSilX63DqzcpELAfNxrONoVz3u7e0dHR8fHB7un5wczufHIocihyJ1c21ydj3DsGlsieDHBAJFnwOzMFNXIp3Hm9vKdoGUvYrdSOTrT8Es5tMF0YbL7OeVzbWHvvBxAAdbe/tbe4Vo+/YeAauTIXkLoLOXNjcvnDkbavqeR5787I/8wQTCxoom9n/7z//59c997rNf+pL0Kb1WA6/RmDueayN5+stfNihSmq5RiXv5bCGe/tKXgMalxPT0D/ygzcEeZZfRUIvjaABPhahV8iJOYUwMpIqQGAcMolIjD4mIVAAViVZ0gRO7yE26Zt1WMbXrmCQ82GgWTVeuXE7s1Scnkx/O+ja7d9SVKKpdMixq/EJ24XIsFrxgyH1QG9vbL34NpUDEJaqz9hJrD9TOFBDZtVaRi5b8pipSa/gLTSbPr6/d2t3378a3gLT0vg0mn/m7Off2gyiIcXN3PxRRHveGAhT1a5n6J7aBZUBCCKzAcPHM1s0dElmKuEP/mjDPVUutynzv4Xw2ZSY6OjqCah3HcT4HcHR8fDyOR7WeWP2gyKHIxuP3O+wM7rTu5eQ9amwBSht3DE9E7Nw4YtAN3DSwBsqzA38R0lwToG1hiBYJ9LG09fNn1s+fIeCex/0hF8+sI1wtDtmw/KJPffnLn/rSl0q0W6vRmI2Y/vc//dOWpKXQcRzt7j4rg3c5D+9S0f6TXMTcXDYRGWtlzjQByVkPNJyCbrm29pt3g1MlZruBpLm0SmYU53UOxTgfCTSM89GEzRpEOzarVVULmCIJ2UJZqdcT7EXeld81jk7ZeCJofJhNeZcFFzelV/P8MywfAKki0XfJbL5DC221ixpOgjHujbffCOvYFjcdqeaxpYFToI6SUC2d15Y357igKR7VjZXJ7d39Xq402I6Izm+sEuHare1gy45D7SvQhWvFFyBbPHBhk5WA2Ykdy/tBa4AVAkHm80F1Ejk2yAAAUYk2kbYd+7OZzuf7l85uHo3z+dzqvcdxnInMRQ5rPRLZEzkUGR6+xGtL43yW8QyKcXVCiEUBbBPyNCq/IqZ3T1TtPoBK8OO2O8JA8U8Kevo1TaVlDCL+L//4wa2t85tr1Ixmc3e+9Uv/v8/+8B9oAya/bdNQnbWYNW4fdUQciJL1hxAhS+chBiHxAoIJe2+dKYp/OlqIGLKJkvfECctXI6SX5s0eJyLWVp0L272KzDyOo92aMKjfxFko6ifNGKqqVKFCWhWwo1Rwt5jmOffJnzlKivAdA5cvX9au/aJ6OriGl9mY1W4b7pXtgi5If0Mt0SlzsTT5eOfaO24tm4r2o1QPkiUbWOtav7ON3PFwiiaK1qEDRFDiMOWWaQQCLmysKnB77xDQc+urt3YP1Hpr7B1aJRPCpqZ4duAL1jGn41mE7bnjZWNSlaoNPcOnZ1jt4Mb28a3dFeZpXKvrT3RZ8kUjYJkIqu8U2d/aOjMMtoMCnNR6JGIV/CbD9zx8yc4lmq5b9AXi3855a7Npiu3cxsrtvUO/1nFB0JoFO8U/6cIkCNRu3pEgT90QnKQBJWsw2DHlAoR87pd/7tnf/Yc6yOqsDs772MhiQaGhXEqRtpTEYH86cz6X0E92Er6QGZ7mCgh31btBhU9CVL2xkXaI2v6tYzUpKKVUqQYK3KRzOmy9rwJzcIC5KNTCX1AkXlS7AkNVMzHFLwSOJSVS6xhoDo/nXaLWaqc+Zge9UDbLtot76skJHzo8EWG2ewu4sIKhuPrcbxSwRi6QFwY4Bs5O8cjBAMjuIsEJtmxCyRcEUPi46TiF1jq3vnp773Br/zAvaLhwZs0uUiKiC5tr2Qers1cdpvb9c4uyEOfyXU/epDBr+cf2yaVzG6Dr1sVKF/9kdjjFeKy1Ave/cxtEs3FUQETmqjPVI9VjkT2RA5FHf+jpo5YcllR9ZwykELJjsO9fQywpisD2wfHFzXWo3to7RJuCS/L5jTVkNSLR7b3D8xurxpr2M4Ct/aOLZ9e3dg9zXUJlNPNhbHdhcxVA3Ilia+xyk9H+62++kbKhsR+qCnj1u7HrZDKxds3Z/inth11imMicUQxJJsgw5rYFsaxEzU73YbGtexETaRcx8ZTbIJTDy1ACMxfPRIT1GxgkVCxgt2b5nMMsuCVTlcLZ5ssLCYk5rxEGkYWJXQxUBXjzrbeKJ3j4rWiqynG0sPhrxu59uB4bT7CNvNQrXSALLxnQ0r3r7x9s3xqmq/CAF3rwdfqVqkgWxMl+IohGoUWIgrXnj7YFmquCjTPniOnG9h6gG2fWobKxee7W7j6IDqtMV1aTxbrD6rYlKaP9ODq7k0qe1s+sh03300UKXHT47tZerWeHwd9vU3Hjw9HAzE41JQZg5teSB05ETIYvfvrxi48+iIAidpTd64UFXRpc9mGZZRc2162D7/LqeuyJyTwAPZR8nAKYrqwdjL4oK2sbRwJVTFfW92cYllcAXDizZoJxa+fg4tl1xPW5ngbn0BGpdrrx+KHy1o1r1KLHSkSFWey2XRETOXRtHg2kDNbjjnmB/Yy/wrjZVQRmn0rhluYUW+yy4PBORIRL8Rw1VVVwl1DYWJXJvEaJS8sQhzLjOA4gsluVg9etgVA7vKa4JqH3RCTa6MQ7sIeYLEuGsqCirZk7XNuFj+qZumkuZdQ8nEBEFFBKKcPAjdGpP1t3VwfEpUyXV5IVjOmtZqohPLPHnlkZpO0xGTjsxQkuEAHXfO9o4bOA4oH7lhtrEz2wtmbXOyy+KLbPrEdSb0/rwCmQ94srAFg7b/tgV9gEAJNhslIK7ICgA1Cl018UqraFO1UrcCIyA45U90UADPdeePQHnqFEhIQHl9ctJTwJ2hhtgpfOntF0FBZFRmPNH1xZzbXKy6S6WZ8GDYmQrSKiQUmxhQOgH1teg4qJrpfDuooM++pVx+5E9Nv18ld/+RNf/uEWvHCJV+vOmd0O2qZZabe/uXA5sEb71Iy82hGCQivVvGXU8XPwowBgkJJfGKKuCW2ciStcRtTxdq21qtKiJzuoSB3n4k3tFsxRkxkLh1gdaYfOzYSa8IuIRSSyDOOtt96KEJU7EtSVDZnIhnWhtmtGQtTu3TF2HSNHPJOuxM8bXBeq6vvf/Va0v/IF9eIEb8sAUEbSkqIHOQHCYG0QnVOZKF2xjGDhw14pNp2uuOfcWRDd3NpJ1g1+6q5RSm/Bweip8ychD1bh4rkzCeI6dG9OH+1fu5Usan+q8VGF56+r6qzWEvunQAWqyKg6Uz0UWSIionOf/2RzhWI+pHHrjC8uA3TvhfMxiMBNgXB9UZCLEV9cBP8LtrxRU7tdB4BBh7ZcTAAKgr2b/eg1qulm0nLnfvnu3Hr36uyZH3QerNIaUZjOcFIsIlaBWIksGsxMNY5zkCfJHRNyXGiOpkhUREBUmOs45p8yNG0PkPCgXEy0eYGNTWJTapyHishgRQuZvRzqJOXTOxDZoJJk+gZxeUDn20S7ZttXrwaKFL/cQm0a3Q6QUpcSQNaShpgtnIDmEvh6lRiDqbijrRu719+NfYqoRW1emQMsl8Zgqj746VdZZUwb2VBJM9C/WLDeJtMH2BaYBefPrNzY2r20eebG9k7vALu5oMRhvl3N6bXF9ykjcbsC8DtTScnSHZTSM45H9FkKyV4EMHMNSqPI3O49EDmo9UDkwjNP3X/veZV5W4MQTwcvJiEq3lbMOWTROOf0zZKkUjYw53dERuwrQ4fhEqXhJS9USuMfoCl3KLSf+QbdqiPWtm1XZ+X05jtXt95/+/z9D4tKZjQYARCxN3gnC/PWWk1KSyki0u05lQgmx4qSgLkPfcXcEggmf3DAVVslUrdJER0iux1TqKX0EaCR2hzGlIZhMjEszpktFZpTXFPbMkUsTv1MDCkcwDiOVHjgEther169OiwtaacXrO4gsUcwcfsxXlAFE1ln6VIKl2JXt7QR+myaEn776y9IZnEG+8QqKkDmtycSjIhLO4LKbUGuunk7eQ+w5wDGAPosuztOBimtAtG59ZVrtzLLt7NOd4eRnZbpPjx6l7zQZL6juVnJgqTAPBpKaiR1mD8mwNwij5FIeKJ6KHIkcqw6uXj2Y08/Op8tlCi0NQnOdYUNiGMKL3ptICqGpBliatMy0MT5aTQBtNP+CDnFynppMSwRMndHyK10IFKH66K5a6l1HIeHowmoYOva2/c98mROcIEnvWUiiGgymVjveLclgNTaHWxT8OGC7m6PtC9a+6eElrUOQ2Gy/jzRuwatGaM9Xxe9qw5lmDA5Hw5cmNguifVkDJj/CUCVmblwPsjKa71s0JRBIRVMmG0A8/lofJ/EHVupIksA3D437NGvoSpqrfNaCeBS2O68GYpldVNWLAZ/BAqgDDMG63Ym3w9cIrzR0pk0DyWQ5N0xDTxihiG3J5mgfT7YJ9t/GeRuHahxYXPj1vZeJ+Dp4ixufMIfNAr2tBu3ty+e3UD4JLoQYXM5sWmM3eWA6KyiRChrDkC1qp5Y7Ep1DkyILj79iIwjldNQRUMwjY/EFzYRokuVdAPq9GEA9nAGa+4GUpDbiY5gQZricQBIqNtQQCHRebqTeUosJL1aXAzwg1hf+covfeIHfrTVMwZwbYDINoj8yjUGwFyGgQtrZAdmcbtE0rRdvptcUSMSZFSIiQYqUpzDVMZam78SNklqU8d5ctJPrj+Csg5mpFzI7ZJn3kD9pl8QIdiGyO7sFCW1XkVEpKRS7eTL2ffq1atEFDeuhDxl/We/3MnjwYuqOla/iinVGNo14/2ekgLEtH/r2t6N9xRop0NpHrltD8Cl0WnhkXwnLLpvanAGL0Rtq4g30Ob8FCynMW45dwGLIJzp8vMbK7d2DpBblV54zqoH78lAvpEAcGNr1y4WV0m758Pfv7G9zqyRi9ujSYX1AA5DBgzMZnvt7t+ZyIHIuU8/dv6BeykdHQDZCDu1QaheJPv4kYSJsUbApn0o5+RFG660PBHTPT/H2akqcodTkBpKDwsf8AZordnvlHwg9ohclyTCUr393tVLDz3uPNh2dIEbuTCNEcAXQa3eeT5appk1ZOKouEEh0tCzWfoPkcKFQIUL2AJm1SLJ6aUSEVjQZSPYblBOuY0wVwCDWdK2aQDF6aunoQUnObq1jSRmYj/FFdglcVJrEPG4SZNdgCPOnrzlWgUgosQS1rPP0Rq3C5otQboNkjwnmomufvur8/moC/OKD2YmTIu8AtDwFFIp9GbYrTUCGhP5fUIJlUgBbXEi9XXmbgQd/gmLfmZ1eefgcD7WHEdDp6lkYygLJyDxev/m9ub66qRweyxwcGN7aHgjuCp/ziWDg/JZrTWK2k5Uj1SPRD75iYdnJ7MojMyPh7ZtgpuvUwttYXWJPyRZTU8gfALfvtOPu4v4LbzTDqwWpTyGG0gOaIpCm5pc0JoACO9ffeOeh54oXNqXYxzGkKb1olWriCV42CMS5QKAMhEKq5fvK4H6ojBSZBPUOlb0AU+KjB7nrsV4HBCYlRe5Idw40YHSmbaxqFpRYXJABrUTpZpgi4pdQafWL9JL/+nKlSt2qpa+mvuuoEglbbbFXr5YZiVqq90fMqDXdshHpXk+Ciyfu/TOlXadlyb330UE8iO/Fa9/pMfsHhz949Dc2du3rjr52nvvRunZf7GHDsLwmmRX1VF1Ztd22s0pqmeefGBr/64Xr/1mX79FC/v9vu5ichf+cFqjtzcJmM/jmCOiTQbiEtJrRFUKc6211lqGMuGJfUtD6ExhSOYsKUBxS1GEsnrOt9RGW6pUmEwkYXgVvS/mqJK7PkGUf4elQRgEIqiq3R4qFmHzVhhwOM2ZyWCCLWErwERi35fQXBHB4Oj8hgZeVHMZ4yzKQVi49URkzrlPtRsk1PKu43o44Oy9D3QNlZBs1PzTu77+R2a2JKsfSfhDebJ7wuLQ51t7w4d+0g2Wrbm1bhkjfDWqzlUn59bPP/EAPnpUHz2c/8lePWn6kD9oD65AsNoXBe5/7ElLUiilaDjpUa6qvYtCTDq6dbHMZPdwAhWQd60UDd9PvTpAoeDCkWXiD1evK4IFlD3w67EMzSGak2aWsgxFQ5rt8j0LSA3Z79skiQ0uWj5aF0KEeyr+jjhgUkLUSROJyNUrV2KVIvmDSKHm9FeJEywAmX+iEZIRbdlazL1f0XZDzEn1WJb99fz9DxGog3+JS30H7wpHvx9R+Sfyupur5n/53t8FgNu7B70RvjTHziIEpW4V/L5M8ydUx+6/M9Vqyr6vf/wf7/WRkv+PoxbuangBdKkNH3v04w898claaztADX/BglIqGbdWKOxIaT4fmedLtGTym5EWuw6oRMZ+Dan2p5mscBS9KiIE515qYa5xNWmeBBnALKVY9rFpkH6QCoXoYAV6CUdVVaymOcvAbEn8SC67+/pfTOTsc3ZlpYFbu7XJgthW3iFdpJQSD8iifEaOKPrOeB5ZaKEIc1HsPNPqbJ589ofeeO4rUYJ3ejtV6R+NH+jUD//YMn9zZ587abm4uR7V/x9G+e5/ZC72xXFnn1mWF7+jET7tzX2N1hAzYAzzK6qz++7fPa7nVpf+8WZ219cdZv3734YPM6i/iVfvTZ6WaWIeJoMd7VpOnMeyveSO0281xktRn81m6NVlyI4Hn1XNB8wKMArrV6OpQAhbxIYjzSNPGDQaW7BXMLAVKEvXTMpoK8mQ1eeZGmILRiFcRMTDYD3RRSTbanqjubimSBVXr76ZsUaKzjze2VW97IG6BAzHEi3u4I1BsvWkWXWptZRS4sB9UeShdr+e4o4WLe1jRIu//1a+frNmxDfeRPfmzgHuLsbf47E3t/cA7B7N9/fnn9s4t7d/jLC6fTQLPX62xi5mhwGLNMw3NuTife/N6u7J0cPnVr7POX/fr7v6nx/60btpXwDI1jB3/cpHPvVDaX/5n/2xlneomu1KJLoRn2LOrAQ27GnlccmcQyn24doVb4mqhjWySiIzRFaNZAkko7U09vAQK5TUnVENMO8HKW7xlKyuVrxN3JCeMdBymC3wDUCBYRhQK8ILT91DTFr99CAbzYFI4gxZqwIVbqhbYko6ZuSxLQ8F1Cp+mXgZMh+NmUthgBqatoVLdaOqwEOf+eLZ+x5g5uvvvvn2C18jLsSDqpUT+yEQiObz+sa7H+wfnoDw+Mc/8ft//J9v4VAL6yk4wvYKvP6t/+H2tXcAXLu1/c71LUDPrK08fP89b127uXdw+Hv+4I8/9amn88MZ4YzlVNEWzLB352Nl5ul0yWINtp5LQ5nXqnGFoUEeW0zbKmTYU7yvvqj+xq//xqvfefX86pn9Dz5ovN5ifjapuBo3Ts4rUK3JOKDAbByP9vdXVlbGlXP13KUf//EfT5tdSuHCFBdtx/7ivXff048ep/tv0FjP++//WGz94lmRQlW/88JzP/+3/6vzZ8+sLk+h+tb7HxDx0489sLq0BODsxx555kd+v31U8vRf2rluKeX1V17+b3/2rwG6NJSLZ9ZXpxNiKxKwGCoTWCHz+fjFf+YPPfTEUwDueejx+XwOqNcShGsawFcbcwJQtcoBGcXqDeyvhdkqdSVNS2dGrVbPjy2AImJFu/55u2q0DFXF6vZsJbO2r9aqkl0xvBLQBNNPWFUBDA5Zu/Y0MJzMVlXrrmbLcyKyjOpOmAmAeb+iGXZOsfBw1Sk4midycPNcRfw2jtYMIBu/IhxyQpxNxKOCs87e+wARnb33gZe/9qvrK1Om+LhaOzxUkZP5zENjRLWO5+57sBupy14HUdoTdvYPb+/uq8o95zbnYx1rvb13cHtn/8L9Dy+OE22c3qy3a0LSXIGmNzy7zHcJp3x4Va1VKLK+c5y/8Au/8MobV8+ePbt2+fLR1tZyHND2Vwpm1p+4OtUxjLDGf4+Wl/f29kRkbW3t5devnvv6c3/kj/yREE2HhhRxShvrceW7jtPSDDuF2NbzRAnAY48/ng4nYqFU9fb27v7RyeaG1Kr7hwe7B0fM/Na1W5/7+MNEtLQ0uf/hx03fa3Qs1raeIKK//lf/yt7BkaqeWZ0Wi/DYbYq2lSJK3jn4h37/H7Z+NzF6chEXQSbYM2d6Z44TsAJ01FrtXothGDy5ENHWQl14CEQC1naNiSl2qZW6jnmBpAFPpaccA1kTSkWcLBEE5itr5skyqwi7SYz/ITJmLQk5HTZtkiZ+SS+sM563NrIvE9FQCmeyWSY3d1Khsb8hwqpAXPtGpvjvlIq0torokestrLlktztVVf3cD/6IiMg4lzoGkPCU8Ekp08lgD3zr8uuFF8fpOrdJ7+7ND7gMY9WbO/u232fWllVkY3XFTrxPjxOpyxc4gKKSs3S+jrtMmX0a3eE7xQFYMVbpkveIfvEXf/GrX/nKmTNnjo+PZWfnY+vruYwafluStkHUTmgl611U95iPjo5OTk5qrZPJ5Gd/9mdtnEMZmBgRgfFxVi/wJMTku3Eyc2ndX3xRuVvMK5cvBye0rQfw3/83PwtgZTpV6PLSkn+EWYmHyWT7g3fSotkZpvtoRMX6HBK/9p0XzcpMJ4UAlapqh0AKBK4huvTwk6rKTFwsSGqZCYWJuGQTCEfOOVREYkwpZTIZADV/ODk5Fz3COmh+bmiEEmvTGFkMgItUgSpHrS4FhiIfZ7G2+NYqjUvhiN6qd8Bua6kq6kmUrkkpuCriSGGEDdmSt8EjJnrs8ceJuPjSUGCriAy0w8kGhZscx3tkp0cOydSzKEyqmNOhC4Ci/vRmCoiIDo7nAIlYfliN/FgwMJkM06WJMS+B3r7yem8JKR5lQ7117V0ehjKZ0GRisPrM6nKeNyxNJm+8+qoGrstVosaevq5Zxmkz0S6Yh86VsMXM/3r5hyf+MEWE8+d//r//tV/91TNnzph6PcP8wcGBzb05hCHMJqvehDFlOAZARJ/+zGdms5mILC8vl1ImcWdXvtp0Ynl9m8xK9NLqEtxqKDyrJ1J3FLhy5cqVq1fRTdkYYGU6JeZL585c2FzfWF0GsH90PIoyD8vT5ed+/e9FKMhHVf0KLxDote+8ZIZUVZeXBgBjrVaLDwsOGzty+Z2/75/ziy252K0URGx5dUxeKB/6V1IVZvM95sJcrAfYOI5jHa3zBoDi4Sbu4/m2Dq2mvY+EdkuarheTVxGz3bNKxcfKJdMQTdfYlVFq7QRSnk1Y7B7hxMw+Wdtb6/BVyjAUb9LDlKVYTPTEE0+0XQW4sB8d++Y32QWgEVlDmCC4u0KWO0JxEMUUF+sGI5jTb50lxfPgycfJ/Ozv+mePTuZYuK5Fq1XwqK4sTabDYMN48/LrtjclOk6ndVCVqy9+3dZ9b//w4OQEQR/AdGmyNCmXX381PQsXv9DsBjstwjcMgwUeLI5pI+dSyjCUYXCcw37ubXrXdQH7IkdEEJffeONXf+VXz507N51OJ5PJpUuXNuZzCqGlEE4NB8klOU41e2BgQOAn/thPPPvss2tra5PJZH19fX19/YUXX5To723sY7vvW29enI3Tkwob3PIdy9tUTPWkrAYYvXLl6uXLl8Mp1tdfeVmhm+trxg8bK1MA+4fHpFCtIuN0MpQhSpsVTDyZTIa4zOmVl563P0wKS1XLvnDIwsx2iEaYj/XRpz7VNI5rRb+xknPY7Ld9OJdK+NuqiNRAk4461rzJxC0Zcyk8xO2tAAxpwy5P8dv/2C1ySpPV27mRYg78ygkC7PPu+QUvmJtJNCQIsrhWVvaz9fha9HUcYUbXWGTGGTm0eezRRwHPNLhy+UoVbwVmqqYUNnsgcXMSMz/6+KPjfJzPZqowLUFEYPLaPWrkjW5TLsxvvvkmos3to489duXKFfvo/Y88vnf93eqtX1VEmCkvCZ1OhpP5HKA3X38Vv9/bI3mGuuMLAoaQQdk/Or7n7KaoXDqzemFjhYdhMpkencymk2F2uP+Jz34O0rfcuftQT61qvpKlNM4JQLDEcltVkwDrjP8Lv/gLm5ubS0tLFCUcAzCqTjrz65qy27VE1wjHuBBZzcNf+7/935eefvqxxx4TkWEYVldXX37ppWeeeQYAMxVeGKqN8+NPPnn58uVsVuxxJSKC2v0rjz/22OUrl4G2ArGqpfeFrl69+ugjj7768otEtDJdWpoMImPni+D9m1tPPHQfgOvvXK1Sze5rBlljVQ0/A9hcW1lfnZqFCa+dRNWuj7/44KNN18dQCd5u1IZqDdOJaBgGh8NWpxDnslyYCLPZPEAkwRv0LTIAW4XA6aHa7qRY2edDquzCao/11lhVu6IuD4etDkRV7SZEJh4QNPw8qTMmIG+LJF7gotZdwzUhYJA71JJU8ZC3WfzHn3j8jdffQF5OQdHJT1UVpvIAunr56oMPPgC0HnOqiqjZMuRBxEC07VPth0qBxCyKBlUQrV247+D2tToTrWK5YCr20LK+Op3uD3oIhb75xmv2mMcef+zyG2+A8h4NAvT621eHyeRkNj+ezW9s7144ux5rjhvHOpy9b2Pl/K/9D1/9tf/hqwi81P+bTgHCFyKik9mM4teUCmcF4E/9a3/K4KadNBQmVYx1ZOJHHnnkpZdeeuftd+69994MT9z37rscvgm3Ohzv7eS/hkAkg5lfygCpfuz999fuuefyAw+YBrlw4UIZhtD0pWcAhAck1exzOIDRXgIKS9R54403Hn/8cUuqJYW1ADHnk6L7p2mYt95+6/r16wCdWVu13jUQbKytfLC9T0Tb+wd1rKJ65ZUXfw8xPHsBmgwAEPDqyy+axkxDKlW4FPeVa8UwlFJ+x+/7wxZRtL4U1gojoYOd0yhYqggsd4riQAkAymAFtKRFC1cxEbLvVkTwxHU04m5YO3Nt7hJFKR55uma1HuyUSSGhDKEqYtWBtQols8dQAa5VxK5WaY3I3fyY2Li3JhYKZ9LElwTkrhi1SOdAYn2Fqj786CNXr1yl2OnAdxQZWu4ivvHG5UceeXgYJuzNPZQj1pFREDv24q6QCkRPPPHE1StXklmNBgGf+x2/++de+vqEiw6Bf5gAVqlUyub66vXtPfvGm2+89ugTT0H1kUcfu3z5cgYEbrz75u72LYDmY33/+k0Q3dze++KTD5ojeuuEiIbJ8mA2/5Q09v/e+evx8fFd/7q1tcV/+l+DQlSj4yY8tkkA8F/+l/+lXWI0mUxOTk6e2t5+/ODgVaLpxYt665bkTZMZBbHnRsNtBJox1rRMm0cvXvz4ycnKzZsvXbxoxmF1ZcUjJSYBXchNODq3uTrOGKnHVNPqK0CKxx51UxwsGTwQpWYEevON14iwPF2qtdY6rzKuLVtKCZ2cHL/x5pXlyQSqb7/x6kNPfELDgoVE4LsmvVAQzm6sLy8v+zQtpil+AjufV8QtNur9NEhEvPWkhpkA5VBrV8qe+Rhu6oZB5nP/GrFGEbJ6cUyYgMWhdsLlHha5dLUeT5lGAQVzdEH3Do4GFeDrHM/hMgxlKBnyiuwrz9WqtcYd2ZptTjwwI96B3T3YhbswNBKh+fHHH3dQZ/dleEjG7ZKIVKkKfeutt8wxCE3TGt/ZmZiIcCnDMFgk19xdJnr88cd9TI2sKjA7mRHTwIO5m+7yDAMUQ+HNNU9auPraqwq9fOXK1atXKPSFqt54+3JhLkx7RydSK1TPb6xV1VrrYXf75Ee8TkWD8rWysrKystJLb76+8fVvGEQhkIqM4yhx2SoRvfzyy2tra3ZC/qlbt9Z3doRohZlv375ljLL4tAxlaUSwCBg6kS7A7jgW5i/t73/29m0Tqtdffz15IJ7SlaZotEDsopHJEj6G4DlVfeyxxx5/4gnyGKE7wR7ZABHRm5dfBzAZBqhqVRGROq4vL6nKB7d2BqJpKdPJcOW7L2agpAQbEPNrL78IQBXTpUnzRizeqSCgcKFS7nv0iYef/KTZG7V6VRGw5zVk5mQ69mk2PNpCjn5dc5l2DRDNpSCKcNWDMmJX3pNdv1yKPzmkzhfNYKzbeUnlF5wMsiq9xeu/88DCVmCwZeUujclEzUECQtTJzhTyHC+qmS0UaNoE0FrJuu+FP6aqjz/22OUrV8w5t9gyRQia4BqxlOGtt9564vEnhJmhXiMleuXKFVuqKNtfuEkUsczqxRVIzvvkl3/XWy98jUshsoKNYpeXE8EbPoMU+uYbrwI/DndG3JIw08133zQ6x7P50WwsQwEsK8Nzxb/n65RwnnpzZWXl6Oh0WZKVvPSBXHNUBPzSiy8sLy9Pp9PZbLaxt/eDh4d/bXn53hs3vnDPPdjaGg4OLOBc46pFii2ncHo5hG/t4YevzOdn3nuvqj5/7tzDy8uf+TN/5hP/3r/3wvnz5O5faxKs5szbNQkAPENPw0l3lKRNmhUgkaqiZSgMBvSJJ54AcPny5QxJpnNITMtLk+lkMD/JWHx9ZXpwMlfVsYpAoHTzrTf85KZbVSb67ssv2M9n11fXlqe++QFb7JhTVY9PZuaV2EWJVUQJJBAWhV1Gr0MZKA7HFFqY1WtgWxJRZlBIqVJVRVE8bKSZqhQHb/6zqZR2c16wLyGCtbashrpbcysFZfojCFCpIDIUSVZZhII8dQzZcC9ThUFMbL+LiFQ3CD4Xy3uUapwiKhBLHFOFjuMotdpqm7Z98oknyI52NEM7sRfaRnD1zasWmWWgMC9NJpPJxPNXTB0sjhahsIhytI4ITubzWqulqZgblwHdM2ur58/Y5dR48/JrFk7M0Vos9fb7b1kAfjYfTVc+dt8F29+t+mHFP3d53VWM7/ra2tr65re+abs+jqMfd8fB+1//63/ddDmAT2xv75Xyz43jfaqTUn7gwQcfmE5XmQmYwLMXHC0DMOkFiOjs2hoBh2+9dfH99weAiR59772///bbN/+D/2BtZeXZ7W2K2Fgwm1s8W1u/Q3qscV2zmWRPgO1Ga3ALtdb5fC7inPD4E4+HCkbuFBMtT5f8DNX0KheE+/bB1v50aWl5aXLtzden0+kwDBRdjU2dfPelF3IBp0PhmHJEMhQqJycnP/h7f9xNXa1xR5+HgrT6PTtjHXO0TBFNt0wPhdohgggRJpNhMiwR0TiOFoguzJPJxNZ5MgzTpaWlpSVThbpwZppS5qZiKAMyrF1rHWuXZBwHzgRYaYCPFqo61nGcj6LVQbtlk9Rax7GOY5VAXxQuDyJglsdZMETkwyPxXG0y1GHhcWY2Z6OKPPLIIxJndxw9DfocBvM0rly+bExTuFy5egVQLjwMZRiGACEwBvA512oK99RoP/c7fvRoNhcRGUettUodZfRE1jIsT6eBgOity6+zX31YYHHL8ahMpmUyHcHbByfh7Z+6RPIfXVY/9Aloa518Y2tbuCwvL9sE75n7jSQvnzt3fHy8vb//5JkzlyaTzbgwdgAmwIT57OrqeP68XLyoFy8uMR8dHU2JlphLLNT24eGjt269cPXq6++9V557TiKVt4qM4zifj31eQC4vIb0gH60HuKAEMgDpHqIhWjZ3hB997NGHH3k4zcDS0uTxj39iZbo0GQZVqXWcz2cy1nvOrpkpIaa949nRfBTgrde/w8yTpYlxQuGS0ru8NLHTYxA8ydKc/DJQKaJ45OOfbKNF+BXGt4YFFkbLzp+KMWoYRLLbDll/LOPe+XwuVWzD2hrFMdtkaVKGksdArriAaufIYxVV4pJeUvBAx8cxWntfF0eL6P0V86GoUCYCPGO7Z1APgKmH/CaB78O5demOpBOoYqx1HP0s+5HHHut53JKfAQyD1VT4H6+++SbC2HKcrUZoDDCtpl1xFoCuL36O9pnf+c/EfaSAzx+iIjKWwmfX3RF96/LrMVqVKI2YTqfT5eXpdDlaHOnm+rKtwbH81hTfpSdsr+3tbYQvD1BhtpNC44aXX3756OiImZe3t9dWV8986lMAPiZy+fbt127cuLq7q6qj6oR5yjxhppUVBo6Pjs5sbz9a69rt25dWV5eIhohC2n/nqheXl3Ucb+7untvYWNnaevnll0V1CNTjvgmBy0LXAITCioQau45kiIPwBZSEMESqGMf68COPPPbYY/Zwi2l4Ja1BJWgNK3R0PF9ZXj6zsXF+88zbb3w3syXc/L78QsaMJgPDMakQaCjDMF1mZqnjpYcey9E6FCYyR5pLHO/0H4i0XcCvKQPAVkIADywS0WQysTd7ny4eAsCUXJ6Eh3OnfpQY58kV8OQwICXAvFI/XzBZc186M+BsVUXZcxMt+5IikcD/LFGgH9tgTyvFz/SJmP3WNktGW6jwCAxg6QA2vEcfe8y8/CrVMkg53PEGqFUtuutH2hnb9GhKBAZDY0ZsoeFno354PEMIeOHCPFj8W4GlYVCPBcIOkyKUIET0rV/9u8aAe4fHRyczVX3i45+c16qqRx8uvXdxzj/k9WG2+lvf+lZDL8VvwyvML7344jAMa2trFDczH6+tASiD16LMROxo0p7LwNJstsy8xDxh3trZGZj3jo7yr/av/Tqv9dza2r1ra7P5nImWlpZMHWfmvZ2LEDwIkstrM3GhKpGERcRMb775FrPfGyaJqltQl4josccfP7O2/PGnPnl2bVXqXOoI1UJgolrlE5/+LFQOTuZLkyXzpDbX11w7sNebfPfFF2x3N9eXJ8MgCPBnnUxFxtkMqj/4e348U4OMdqytJbf6L7iDf4A0uhGbswurpcLkkxnB5ykgnqYaUJQ52za1FxcOh4jyw0R9ibzn5NuYS+RuxAmiy7GqDhaFsF+YOS8dsfiw+N28iKUv1FVFGzfCvVNvNGUjTr/fY9sghVq+6ROPP375ymXPZVENRMTS5RIRcPXqVVqUiohsB9hw1O1/8qTi6BMCwmd/6Edf/+ZvrCxP7FjD000VCkwGPrextr13AOhbV14P2OUDPjw8rPO5Ajdu7/zgp59S6JNPPfnB21cqUKO37IeJ5ak/aXfk+9FvArj+9uWf///+ufiDRIwS17b37SvLy8tV9fjoSL/+9Xsnk+euXl2CHwJnY4Z8ru3UPA4R7tQvCpwdhqcefHB5Oq0i27u7xjR/6//z7+fwjIV2rA3Q4makam1mAwBhc22FgCtfWaHm1+Q5JSHSEhSYToax1nvOb6hqXGKgAOajnIyHv/Pzn9o9OL52a/uxj10C9Ct/97/53A//Acr7BIFXXnoeUCip0sTu7iCGSlUZZyNBC/GoePiJT2gEda2mL88afL8s6xvKhaN4zP81EfVIXeT25gfYq/znfuFgMHC1k2pHKB4xBfU3ehAIQykS3O6WC97oRqFkQJatOqNtRwtQMgg0TCYTaWbW+1/6AXbcmGh61mIUtFhUmBwpIva53F22a1PszE3VjbNCRB5++OErly+bx+Wn1ZZTCcAaRDigogQ27jnDgZlEHCVC5GESWriAAb3voUd3brxr0lk91mWn7LIyXVpemhzP5rd29l5/5Tsf/+TTIFLorffeuv3+20Q0m83Pb6xurExMtMwM7dOKu6YfYkU/4k8f/dra2lq/dHb/xnvtLW+eQIdb+wCm06mI8P33r9+8OQeuj6OcO4cPPrAdmTAfB2cHl6AA806k/alEFAf4j545c3Nj42PzOZh5Mjk8f55u3DjZuZV+rSs11Rtxco7TD6TkEvt3BwCRNdD0tw3mmQCkcCwOtbnToDKh5ckUwDgfr93asUjkUIqokDqPvfT8t0CkosvTYXNjlb0Kr6oXBgm4gPn8vQ934ySV6v5jn18RDXSNh41NqesaiwhsB8xHJNg5cxrSTgipEBIws2W2xNik+F2YxNEy3nANiAhMXFkJ1hDS0RSp1OqcvyDG9hmxy806tOCOci6uG1Xyfu4uTnCY35IMAvG7flpsY6OaCqJt2COPPPLaa68TkfjVJ66eMgrqQKJFRP3UIYxJ8ywCPKb3Hs480cY9D+zfujZq1SoWlhWNBZC6PBmOT+YAXvvuK09+4lMx3Pw/vbGzt7m+eGbb+Ut3fZ3600fY6jtfb9/YXlqeeLgiLRrTMJxwBE5U9ZWLF3/nbHa91o39/RmwXMoEOLKLuWJ5+3/zv/nrfUtLBbg1jq997GOfPTn5+pkzD8zn7x8cqOowDLtzue/cukmlnRwC+sjq6ns3bs/G0U45A3nBg1f5H1s5wtvXTh657wJRS85FMAzD2spS29e7RAdVgVrl0tmNg6PZ6vJkKPyVv/u3f/D3/nNmo1554fkMna0vL9upp+kIZgIVEBP4h3/sf9mtdlpQZI6EPcJKAC2qYsvupxtNWalqa/AIgIhBKIXHsc5ms4kql+JGRgM2KwQiVfI5qnYLg7OBcy+BQIyCQuZzxGjdKGmAkzC+Hi9m5qEXLYoLmqRV/8bHDQ/E1abqItLml846AM/TMnrxNBN7y+OoY53Px4ceeui9994jI0xE5F1eKcJx5iqnWkn41AYcHNTgUMe5BFx64JF3Xv76MBkqWf8QFfizJsOQovr6q99R/Rfs11e/+RsGVHYPT7b3Dw+OZz1bnaycvYPVfgteW1tb9sONrT00o+YtqXf2DjPkXkp5ZXf3U8fH95w79zpAROvME+aHzp59/datvdksZVWBCdFJO2/z1wA8+9BD71+7tjQMX1d9jHl1HDe2tn7l/vsBMPPRvL5/fQuRdBD9g7WUcrB3GIsfilSzeXBjLuO8D27H9W5NyXcbzJFq1n0ixqnZouTq+zd/+NlPTZamCiwtLRHboUb9zovfDgd4xZxSKCwUpHFv1sl8Nh9rcoVGRbYffEQ2b0adUu8TYDW6Zug05otADwblhIm52J2tZRhI7Y4bi7K5sSUiFbUD1wB/0Gg729SXj9GdUJPqXJrMdKQWV7bEMuL+3ElExnEcx9Eey8wRTaHcL5uoYVkOSE7e187/p1HmWtXu31ARMWxsYV47yptMJh//+MeHYWBvk40cRvNpq1gZqiyWzuYnq5WWRc0AR02b4fZLDzx8MpsjboiyqIJhIcvHsj15/bvfTZ6keB2dzA6PZ70Fq9FS9B/zdac7mq9rt3dNtUMBrbYoNvHd3V2/8O2pp547e7bW+tjy8j2TyRLRytLSjaMjy6JBM3cozGeXl+Hy4u8vM988OSlEG8APv/eeiNx7dPT1zc3j8+dVdTabHeztpS1VVRcPSx7Isi3EahYmYkQY37m8N/pwMwdVEa2itcpYZRzrGNuqcapjrmJK7/HJHKDZfK4iULn+1hvmNkmV7770gvHb2vKSZ36CrB9VKWUoEwAPPv6Jh5/4BEU9z1CYSxTAdxAvYqU2yx6BuonOKghQXFoCmLccKYx2UOYBK18K8VNxe9Y4t7Oj6k3U1YurG8/bCUjwKpNlETL5zVcUWXqW3e/m0hwirSJx7O69uckdWrJDvzKUoZTJZLDjhGaa0eZaRcZxPrcMCu/PTgQahsHK0WxyUv3SN6uieuyxx8LJ8CwoIlgmGoBUOZQF3H5GpVai5WlxwSvOW/B6yqGUT375h+P9/jOAYnV5undwZGDu1VdetgXa/uBdUwQn8wrg3ZtbGkb++wkzf18f+ogPK+DXeTWYYdHf2WyWpeRXH3nkG+vrR8fHU6AQLU+nV5mH++579MIF60QDYKfWSxsbT91zz1bc83hmefmhc+eOP/nJX1Q9s7nJzMN8fu3atW+enFx94AFTjsvLy6oa5TBqi2mz/2BrB2EH8hQgFKP1lynZDFg1n4DT8txm6o0capWx1rHWD27vXN/a0wjtGIW3rt2y7757+VVj9Ndfecm4Yvn/396/NVuWZedh2DfGnGvtczKzqguN7i70pfJWVd3oRrNBirIJQBQFEgRJiCQE02IIkki/OILhF+tBP8B8YDj84mdH+N2ygwpbYYMmJYEyfaHIoCGCNEncuqsrs7L6gr5Vd10yzzl7rTnH8MM3xlwrT2ZlZVV3kTaFhUL2OfvsvfaalzHH7RvfmCfNzIIoIj9SJ6kK0fMlurT5xp7O/FyJ8kgRJDR6PFfsUrPW19a69c6DB46iZZ7nqZaIWpm7uaggq/zp5A9NoIMHJ3a7DLtdRBi+3QvggHGMZ6H0sYZ3CohTvM53VITHuJWqIm/Btk7D+0pBCmeeejGoNN09EepFy2aibB4szHqCdJzFaCLiju5248b1e6/d00wIAYjqkl1JTSbsNuOZZ3d0o0lDp2dbFs2KpWVtoqoGR7dtZ4RJNNW69gXAnVe+/OLLn/vBd77JvXexrN/9/lvISnE+03G6+l4i+f6uMT/DhM5geHj4tGY+9uyV1r6rquu6Mv2oqr/ziU/cODt7+d49BZZ1/beeeeb7zz77D2rFNP2bb7/9+ttvP/O9753M8zzPn75ypT///Plzz915++13DoeXSvlL03RclueuXr1Y1zs3b37lU59Kvim/uLjA0j96lV2YXdgoJDwSOACL+ro8LCMG4fCEooqIf+wj14ZOHjtyZDTi2h1fb6TWDWdaHMA7Z+dU/Y5Sa3n2Wvn9u6986tbLX/7t3+JnT+bp2pVDURFEt0QpFez1pfqzv/gXozDQNtst9vNuTPzGx+5n1egzauG+hjm+HU+ComVZ+7qu4UKrwH1QZ4kkX07+sD+2w1hXFXd6iyoyZPCxzxymN8BnrjvbAWwcQU9hMAmMqd/FrhjuG1AKB1CzB/e2XGFNxd2QPnZbm5lpYE0ViXq/d+/ew85a3iQOnIdVqEBMAM8zbPOE988M4Kd/7uf/i3/yD6+enmqt0r1b4xnx/bfuOzBP9ey40Ir+xT8v3//mvXVd3ezt+2fscv6tN976T//G/9q6Lcvy67/+69/5znceK4pI1+jd/vqU16e+9LN/7a/9teF4dWJUgV/75V9e1/V4PB4OB00F+PqtW28999wff+WVH//xH9danwUOpTz/4MG9Ur7+Yz/2sbOza6enbvb7tV77yEeW4/HfPz39/Xn+5LLgmWes9zd+8IPfeO65b33sY1PuFer5/+Ef/4W/+lf/J0hZMrfWuojUjKKZ2Z07dzzXPSMRY3sE0PnmzZtjvcouNkkr9NMf/zHRcvrMc8AK63CDG8x/56v3Li6W09PTOk3/s1/9ZRF9+/7ZD978wbWrV4qW73z97qduvxwJJOAwT2A8CcS4C3o3M1Fp3a6/9DmySbhuyad97mNsN+6b/X7WLJ2P17O2ecCV3MPF7d5FpPe+LMt8OIgL1ZvIIMAax9wWLuAhJeF9htfWpRuLB1V868QeQe8UT+62fGY+CtyZDOOpOUYru4bFw4oUkVLqBg1JMqTxZW5bBSPlLkwOODF6AOZ5VpG0PLz1/pkXXiDdFo/LYVCE1s5DbxjAbp6w+C3V6Rkt32TJ8cWf+RPdmrXm3gFxwRtvP6An8SwheMCrr/we3L9179XleFyX5ez8eHZ+dPiLn/ucm7fe3P3bO/7HR6/HSu+TLeoBxiIMa/extLuSueE/+o//YwowzSIu0PF4/OY8/28/9an/6tvf/u5bb63H41eOx68cjy/0/h8tyx89HHii3wT+zfPzP6UK4Kutmdk79+//37/xjf/s/v3/9zR97Wtf+/a3v/3GG28wSHZxcfHmm29l+CSmmv/TPc70TaPuHZOM8wTUsISdmdsgrEdaYYdaHK5RbVeghfWe67JGnsYd7p/9whcd/s7Z+fH8/ME7b7/91g/u/s4/c/cEUfozp4epFO4GY9jAOkspPvqpG3Tr7N2XgE9vUdzDmQ6zNszfbRd5/gcAopsBTDHm8TfsZzNb1nVZ17W3IJGDt9YiYOPeehB67DZseOP8w5jhcfBtzzwwC6XW1luUljQ32FiSNOJ1CA1xzOXhO3LZMEKdGQ0eWaTN6gXMfGAnNW2V/dddf+H6vXv3NhKesKI9QtyImEqE8z3i+okLjWsYApLlXefHpmzm5IAIdS8AVT05OcxTXdYOyK//7V/7wZ1XmKX43lvvxBEZJ5o/Qfc+4fpgOtnT6Boft97Xdb24uDg7OyMM8OLi4u7du88888wbb7zxd69e/cF3vnOj95c//nE1+966zrUezN5+661rzz77/OnpBLz99tsX6/qRN9/874DfqLW73332WT07W5bljTfe+MhHPnLlyhURWde11vra3bvufvPmze6dOQ/r5j1Kaoc1JBny0eHaYXtoFcFed6UpHvnVpJ3aDdsBNHJ9uDvw2S/8oa/8zm+7+/fvn12/8lGIfP2rv/e7/+Kf8XvnqW7BNkCRSwzA7Wd+4d91d2u23yEiW55lkw044LpDmwwez5LLMRaiWy+lKKt/VXvmZQaWaycCcHfrhqhhK+SRD5uRduVY312q1cyNfNEy5hlI24EXc3sKqa11uA+Epe/CsPR3zYw0ue67QQLunr2PxR1MQD9kPws9mTFHURRKgi2RLKlMsTez118nBC8YQ8qeViKfwROxHJAsf9xju1u2yQXwpZ/7+dd/6zcO8wTDd37wJuLc4dRoCrB/9+t3D0WrFFqMNHL+7F/4lVK09M3EeF/Xk+1qd2dd4Zi2f/ybv/k/Jd/FHi7i/lf+yl/5m3/zbx6PR/IBzPN8dnZ2//79+/fvT9ME4Dd+/Md/A/jxZfnsO+9U4BPrWoBS67XWVOQb3/nO715cfHeaMM9fuXYtPTh/8OBBrZWpBwAMLrbWiIq7e/fuCzeu0xzSoorAGLhEg1VkcnEg0SkcsnULYKUXJPvrcYc8e/XEo6VL9o8UQSnvnC/zPC/LgtHdy93d3zm7sN4hcjLX/83/6q/zTh+5cnLlMBk7/ITRG3p+XTuJ0FRFEIBHyEh2iu8IdBhLE92BDqhOE1w1HpsDbN5KhpyZ+J1QEcDhPuRz2ISkFYCwu5CZWak1sNCDwxKSeVODP5Rkom7GiJwPxhVzU6/OHhCihjyWhpkOehbhyg6XwAE3W9u6LOs0EXiw1TFCIsJe9lnZxAiNU8TTPd6k3t3dW4viJEk7Ks7tQRUQc4LUz9g/dnyhCLFagc4Vee75T7/z3W/+4J1ziNJX4pn1kasn33tzuo8jgLtffeWnb5OCHG/dPw8NjNhG/59/9s9+GFl98ns2E3qcTVtqA+7eevtL/+O/9Ld+7W+dn5+LyFtvvUUbdV1Xyh6duu8fDv/ocEAe3py0mLqrV5EebGtNRO7fv09X5fT0lBnmBw8ehPfkjIP7a3fvWrfr168PJdbNBHLjxs3X7t3jxhBVWo+aB+h4bDM7Lsd5PoQYZMQIcHPXojv6agGs9dH3w9z85c//VE6aHns/maoPN9sBwVSjtDNmjHSNho+9cOP6yz9pQXchno8dSA4+dlEeLlnihiE55rasyzTNQ5AytEZq9VDpIqIqZqKqRDSurZdSkWbx2NtsdBYBUQF6N4I05aFSfG5qSx5iZ8sEOrqSBU8q5DQivEprrVrKUHGqGnwHtZZgdglqxfRj0XpfW4PjMM+j3CdOuIxVRHBYwhzkYEj2SdVtedKUqBYsDIMlYltjueEjZsPlF0jRMpHqMU8pHUVrwaEcKbLhiX3k+c+88dY71ptHvMTTtZOoRHO8+eCcGb+3zy6yhR1uf/ZzI/P8ZOF8T+l9t4ueMP/9p//0n/KxWV/uI1Xm/od/+g8vy7Ku67quZ2dn7v7CCy8QXylZU0qbaDytpfnqO2ITAOfn5w8ePNg/+ZUrV87Ozkgxi9AYYRJrKd/4xjckeW2RCZ4x25qIAM1XfPQWcBzmw+4siiPs5HBQlTpNGFsEgJaLY3Qe6mbrut7+7E864ObfffOdorXUWut0vjRK27XTE0+SHgyPHSJFbn7280V1oqfG4HAGkyoJPVRL1EUQ71A1TdNuBsc8zSOJkvom+4QM9eiAR8FcSKwZ4xQj7LTb27k/45TpsQnHYS0kEowiv7Cw08YsWeJSJLq0UFoTyAHQuJ2nKapgON0WUo08ULdWwOk/0COPewiQrEu9dyavW++tNWa6zD15eQcMgMsrrwVjcNjcHBjZepNoU4czzMfwPKuSaofeM0akAanhP/GZ61rqMOTgnm0k/DBPz1w54cH+te+9Vad6XPvFcXXgxc9+zjKWto9gPVaYnyzhT/jryCGFOOnIvPqwslT0S1/60q/+6q8ej8fj8biuKwk9Pv3pT1+9enUg53RHxD++0SN8GufgXnQBXLt27eMf//gnP/nJi4uLi4uLEb9MhIJSIO+8+uqdO3cQuk7rNGrxIo26KStn+exDBzqVWO8djlrZPr4A8tabb771gzff+v6baL1dHMOOEyCf+bOf/ykaay4yzYf7S5jch6lOtfBYAqClQJUjXtb26VufjSBLvoF7aNR4yS5QhQytjUkb6IN4bEt+U48uhNzzC+0fkgS4lyQzGXEAibK5BGNkCpMTRtFHBg7KNt/R0BNhZmZZUybfdafeo+ZTUvUhUn2EP0XU0ZwNJaIMOYPDWW2YsWJLpnlzJ+qgm62tUQZ6azQOef5Ruqx1T4wV3YMxk5noN+7i3M3xveNoZLFVWvw+3oztxOm994998jogmkcYhBpaQUYIfjimWC/WBgjcb7/0WXfvvX/rW996gnA+9roksZf086PyPBh2RtKy90ZSETM21LD/4Ff/g3Vdl2U5Ho/n5+d0nq9fv/6pT32q1jpk79LXcekf5LX/0k984hM3btz4xCc+8c477/DOrbWXXnppTGCuhXPX3Lt3T2lbpdbcnRQPbQkze/XVOz62inEQwkZAvXUUBWw5Lt/93vd/8Nb9N998ezleHJfVM93IbfnZL3wRMAFe//YbcL/7ze8xPPCRq6eT8iDQiL+YuXW3fvOlz914+Se5FZDx7+FwhQ0fyS7icwnn66233MFGI7/HY7u787F7RptJH996X9s6GlaAfD0Dl+5u3QbJTgjH2OGUqyClyD2fup0fB08TJ65pLEoAHHtv9bgsIihKAssgoLFxRzx0suaGkM2ViQPXhlcQyWmgt8aIJSV7WRZzn6ZpXRYGoCKUHIx+vixLhEYigBxncRoEDwW6+TQ8jUwELUZuo8lNPLmPFNTn/+jP/t4/+UdRp97JaCQCOZmma1dOiBl46/750tobb93nkXnzxZe4rUfB56NSOvzMx77h0jsf+/qlV/7RP/pHf+SP/JH9kzP4wI301//6X/8bf+NvhKuWmdtpmm7cuHFxcfH2228fj8dlWUopDHftJ21c8zw/++yzzz777Ec/+lFGtpmj6r0zTnZxcTFiSOPjzG288sorInLr1q3j8YhdU0sfZJr55ACWZUFi8eEMp1hxv7i4mKaC48X9+w/gaOvS2/TWxQWjaFQXdA0CPSoCOocbZgdT1djhAF/lBL/z4Hw5Hj2dCGSVeimltzZWYeievP2e8jawBrsnD90wbApur87ccx461FIXZoxmITNMnka+pceXN9ierYTxyTlneWWIQCmlvcuT18h+M5jcsRPM3HPx9KMARYztBwEnUHYIE8IaUKHXEYElqt/WWrI6+K52MMLFr7/+Om8z+rJy6kqUYnkms3YR6cRzQ8W3Ms7cq/IQs3bMo3VaGu5uvbnB3V3wzOn8+wCAH7xz9uDsIndIMPtZ7//iX2zcSz/y61IS2D2avZtxY+xiocDnfvJzv/zv/fLf+rW/xXmotXp6v9M0ffSjH52mqbX24MEDd6dbK0kfQUb4w+FAvGRr7e23347MpBkV+5/8k3/y1q1bFBsnjePD8s/q2Fdf/WoGdrBJrEuuaeyWnvBsKjxuOz2gtVaAswfn1juJr8/Ojz70QZKZmdkv/cpf/r/+H/8PAO6fXVws6w/eOQdwmMqzV08ECnEpzEdKSrb9G3/iz3DUGLslI6+aIeic6TigRJQulY0Wbbl7kft801Ui2Our4ai6KxAAb6sjPpeyH0E+RtSAnQoE6DBc2gbIRXffkIuX5ryeXjllvMbBhlHFM+yB3SiHtEjEw7Y/9a0rChBHl4yYvkB4rpMUdp5nUc0itXi/AGT03ZaQCYCNbiPyUXE6agZYkIo6o3kpsDHCYVWKyB/547/wu//dfyvODqtDrmlWybXTk/vnFxC5953vnx9XAAL81B/66dYaHvYt94r03ZTq/vUn/8prT1L5D/7BP/iJn/iJmPWYcNkl3fDpT32abaY5G6enp8MjGkf1ycmJql67ds19tKiEiFC5nZ+fD0OXckLp/eznPvc//0/+k9fv3XvogQEMhOQggoa7b4ziY56HNub3nV65Mo4eSRPp9OrJyTzXK6fW7dq19bis9ODM7erVqwBaa6VWd2fHJqr1++fHH7x9RqESwenJAQqYILvbcqsc1/7yT31pd95kt+yRnnB39hVgt4Bsj3LpydOCi+1FZfCErc5P9Nbq8djNpmlirtR3O1J2KfTc6tbNGOjRVOzbPhkbLFC1iCBuIceguXudp9nd196LSC1D3UUHVFohyPpPHmAMDvPRIVKGKRvOUo4mXW0AolqIvqL2G2lbILrDVTaRwP68H/75sIY39pO0Q3rvBidXtJMJVxKeLr7lAAGIfPFn/p3f/o2/LyKqJckTRURO5mlAAii9AF7+yc/Te6y1futb33pPI/nR67Gy+uhfLwexRIKIywLrwDWk7zeCK0NJmlkpZZ5ntibLeU1y34cfYFi8lvWJjJouy/Lv/+W//Ku/+qsQvPjii2Z279692EyMO+YZyhu2TjInpWkz/Ihty8ax70iGYE51KeXalSullDpfeesH36+l9ClErDcfz2xubW0AoPLv/epf/Vv/+X8mwO997VuUuueuXWVsEyWmJznT/OOfuQUiNwHmEUvW5VNhEhmqqrWUcMuzbIZ+qu/330inqipQah1CODYPs1DUH6zWujgeYzaKeo8EGwdZtGyYYIfBa8aiOWOSaWR3rxnQktQ0LcutEFyRvWpRN59ogDiWdWXRb7iyEjEV/trJzhcSy9PCJcDYAID0EAJH4d7N1nVprY1enjyxSMlZSpFkorT0YKl6VSXzfRg7VUqQeCklFl6ksrlrt76uDQFKyYd398zIR4rZnZT0koRGAhymevXk8ODsCMH5MQqAX/rJL5jZsq7f/OY33k0anyyil/705DcPJfzNb37zN3/zH3PsBGnkcscRPlA3I2/EhPBAAhKfPMKWGbwM6WLclOkoyjD9/De+973/9u//fRlGpvubb75J6w5MLCGsnk9+8pM/8RM/QdiTua2N4PbgjOFZyz1j7uitWyQ8VWWeKuCkXJ3meV1XGS22ES1GuEzRXyYaasaXpwGJqVbziJ4M3i4H/vgv/cq+8UprDbUScsswrXTRWSPg5ras6+U9E2V7MkqyJLjo6X8LoulQGEVOd883MMI4H0tkBEt3a+tatCA7AVs6FILkPC/60IY3W61zKQlGZA4mzQRvvffWagBKXCyrajOTM9rthAmxqc0B6sgjKg+jjF/FInp37+t6PC4CUD9YwsFrrXv+IW5wTSa+kQ6SUZ8pUooSNDM2E3vY2qA4ERlxWB1wTto/gABf/GN/4nf/8T/04UDwXqoAnrly8oO3z9behrX64sufiwhhlmi+m+y9m4g+WWLf7fr93//9f/pP/mlsQRGkkTze8Oi5MHDjtJ8HfL2Uwo3LlIynYOw1cM+K6tdff/3tt98e99zf/9LPIvKpT39KXHi4R/LCAUklnDGh3hrbvW9HCLNkWaKjqtXDJ1SR1R1At26+0aq9/IUvjoG7+2Gerp0e4K5BIgsVSCkAluPKjTjM/lKLw3t3Dt/cSi1wuHrvXaGl1h7llh5aSYUNxHrvUgbBJre10pBl6d5QclwPy4CfCMydQZ/EFKrOB2ZhRUQJL9lZy+mi8pCCuGspHszwrqLmXkW1hpD33mspgFeeYZaWQx7YwK5mSngEMYC2dW7UYJDLPD6QFofvzmF3ZO4XyNwswKqpcOwkQGnxZiE7D49k9+1shIHh7kiNIBCHGbZS1YcgeLwHhIhuQIs+/8KN73z9Xn6vMCjBh56qrC2Dg8BLn/s8zdR//s//+VNL3we5RhCLSviSRQ3g/Px8L8N7iRqvDPn0JEnca939O7EDeFCMH73bY3/ermFGImKqrsVz5rGL4jLGBhK2qIjo1dMTlDKdnAJlmufl/NyZRwCqSpfAGLM8hp/93Bf+UHwrQA137fQAEctvF4ko5sev37712c+PPSEUueCpNaAodPihnJmqilrcRv+9jMcKqoyHH2dPFlemQMvuuUTgQWM+BclEyFQcb1GWGzIltRQqOTczQwm+kocfXrx3g1pBYpnz4UWklKqttUxntdZWwEnLKen/APH9lIRMOlPnl9BfGWERiJmvNFaNLMx9nGDTVLPf+KbKReTWrVv0AzRL9odpIul4Z7wvst08+3uqkZFMU93AHuH+7x4ejuc/fUOSIJtauLdmvT1zcqBlwqjNS5/7PL99c1ge3sqP39lPd+39gqd85x4vjV3Q6NI1LOdk1X/Xr3j09eeee+7JjzT+9M1vfnPke3vMPMNIOhYLABx3X3stsAvgFpciOtB701TMQ++piLu1zi5kIOCCPkKpdYiMuz/3zBWqh4wiey5c5HL4kJpqU6Ojp1BXtdaC1dgsUG7dRBDUM1sRe4aUIsKMjE6EZxJTwnFB6Eq4Q8kZUIo7M6OSLZVld4uSuE6UgCFpGhyQh6lXaymZFSaEsfeU+m6mWxZvHJzuvfe+NVKxAGdmlRDlNm451B8Aic5OImLZm6utzd2iQwriGNOdMuEz37x5i2sxDrY9OMaTmmQcty1BDr4LiIfhzjyEBVYr6jbi4fGJz9zIHMB+DwDwqycH7jPkJNDDfBoUx5PF+wlScbmQEHjzzTcfqwMf7aV06XrUXN+hd+TRvz76hE947Es/D1QmIuICRkkZyBgG3dBg+YSY58qgZXZco+YRB18cB0OEx3vvcPuV//CvxjM4njk9maIOkUvKocna+8/+mb8YMrxtHOvmApRMZREgaD3xUvn8o+mPJbFG+sKxrZBUWxnCwpgSWpQiyF3HvocRoUBGeR/d9hgyxLeNv+aLfJjCBJJ5RBw6W7GpiugAJAotox11NUW3t+7py+2tMs9uMfxi2uus9Rt6rLXmbkFMPUhYkr86PivovS3H42c+85meJnejVg30pcXog3wjfEvOgu0miMPPcsqxgjt8EuT5F25ZgN24FlxEFNXnrp1OlWFN+bN/8X/EY+hR6X2y2nxKSX5PJfzY18/OzijGP/ZjP/aeT/KeN98dv+/jswC+9fvfGneQpGjKxIh5cA97QrC2+xRRdy+FFQgOeJ0q08eUnFo0wsUBM2SwKfwdwA9zCchVBCYUEMuY942XPufuhO62PH9jkZUdkRMjBGfArGTImrvFR6NeuGebjjEhQ9RY+ZMwbE+sVch09q5hmqqZ2yY5j5yVjCCu2XmMznNL5CM48qGQaUekyhHZE38EP3MEafk5dm2a2UUCTnbC7p2RTEmNTeRz7y10XrbHsd7NvGTjUh9wyy0sFAGmUmsp5cUXX+T8cgm5yEhPdsxiGm95fNBsU3VHDxAuJPjGyjRNtdThIsL9Sz/778jemGFZnChLC/mdX/3K725r9n4k9od829N89j1V8Qd4gEvDfE8LwsxadHtkeFRFJOvWIq1CNgJWFIgKFYO5NWtA4zzXqZo7+Y+dYe0UhlIKHG1tbv6TX/wS13+u9XSqxBVa79abwyAuKh+/fgsAXWitSpM9LbCU2QwBdLPWo5yGARqJIlsZm6fuSwNEugU+rFtz8mbwjOKW7H1TS+4iUmt182VZeutpogyxT3OJXEHJfMC3lVrJGgnBDmW5XWbWrPP5N+pqS+0XS5Qat5RS6gBaFwd663RsPBNc+R00oby1xh/WqHSrnkaX5UGXzBJcOSc+2Xp/4YUXrJsHnz3D+nEy+QC+afSYGfZH2iK2e35JaAoba8W/nhmOMCUyZCCCj1y7Mk81tyzcvVv/Z+9VRfh+r0vC8KgJjcwMP1bk+OK7QTt/VE/1hD/x1xs3b2qe3Z6NJuM3hE3JuMbrr79eatTSzNNk3aLUDA4LTDszOjyY3XxZ11SVoZn/z3/zf8dvP5nDXUTyAYR6Vv2ZP/0X6PUwcSApI3xp+GNgSsxRJDubYQQ+cWn/cJ8U1kGVChHSpCI0b3DlJWdJmCAjt+eCkbTbnA4PXhrPhDkyojaeP+Mvsnv+GAESVsRq+aSBZytUlRR9HkyljComLdRjolJK5XtLNiAfN+Gj8HXWppWIhtuohzAzZBe8ERZgTpMRkes3bmx3wzibfLM9HMO1w/hq1r4wCCFx9IyITi1RMgaRn/7ZP+kbQmu3LQXPnJ7wbnde+cpIID15Kz/lr49e70snP0E9PuH98tDwnvTtjGC953eNX7/++tdu3b5NuyZyuDzrNfoWSvSjUgD3Xrv3ta+9Pk/zyVxFtE6HcPdU6zy5o0jQuE21iMhU6v7J13XN/SDPXDmlakcpo6+fiq5re+HWyzaYmLINyBbu2CpiHI6MwrJOwKkPak1WTUa/duVBpZZpnmopTJ+E/5kdvyRTSZIbj89ftJg5oTKUzDxfTLKOXcM8FeyigsPvCA8/1Z6HKQG2FFQRtW2FMucSLo0X9qwa0uNBfh8flTCKGAUeWTukPbauKwR1noomoeaOCaGycJfvt4SzpU1+/fr1sAhkA6qNUDO901GfmPIb4c70hx0BjOH65SDMADz/wk0g0vP5IYFjnupci8PvvPJlarkn82A9zfVkeXusBh4vvpssPfPMMz/kU73b9fSq2Ky/cP36jevX+zgNEeBZ7hDPYk9u77t373K/lSpIiZyKmvWH8fnbNdruvPK7vw3gME/zPNVaC1GKAISdO/Wjn7qBlB/udcuLOpdr3Htf1nXs+dbbkHlEpkI3EUgdS18udwm8m+94KYKFy9OG3V3E4bTW+KoDQludMA+NUkPdB88fbrNmWQYw4vDLsrRME/bedR+vG+vE52AkMSrEGA1KZ5qC645wREW2Mz+/hrG4GjzScfEn3c2y774XDAiYmdkL11+wniaxRzydwzM3CJigik/mh8etPOKIQcTlWWTIj3zi0zf4Pu6tId0nU52myt31d//O3xoRrCcL4VPq3v0B94S7Pc0bnv4j76l7n/CnJ7zzzt27TPxAcOvmTcuq8u2z5sMANiPk2F5//d7Xvvb61hrROt/XerQssoGcldBo7v7K7/4W7/nctdN5mrSUEaOhtvRu//af+2XZJ2UjFp5VscnGTp88QlZw1fhG2/D/u1i9u7u33kaR5IAGxR+Zz0sZQao6fpqJGhop7NguAFLrOrC25mPzhm8Y3ANDgFnkOIA6EJnmqSQoJZK4/GMJ7tmg7XFEq/jj8UgWebJ7IHz9GIaOzFhyrvNx13UVyDzNOjJnQCk6TdNhnh3onLL0pXtMt7WWlcetv3Djes9TKEcApNGiWhI6Hb4Ev8gJkObMuvfel2W1ETMXAfCln/359MdDbfPOJ4dpnh5Tt7A7gp60s59eAJ7yDZfe824/f4BrP6LH3uo9xpJxGsYRbty8ySCFZO4qVsGjqpb5CHdMRb/92h3Gcfkgcy0KqSU20VQDO+mZf/qv/y//JyAO8ZN5BoQBTBFhDf1xXdYexW09dou32Em9rckrQVaJ4ak7VLTWCgml1c2s99jLRFn0bhZh7WVZ29oSwZfuq2Q9A5MkO7KKEUJyd+tkEUFwmKgc5nmaKtyNmRRVaJr6tk1ab721TuKArMb37hsLR0Vm0xxMBYVtMHJWpRRLLBZyW6fz87DeixmMkmhVLbVKnqbjsIi4czrBvnmxToITAAYvogLcvHHj3r17UB0OQyxlRtkAkusPr9k9HX0ZYb1S3BEwmr1cPrwj2bPmx5+99oO3zx24d+erKzYn/7EfesprfPzR+1wyoQnGYip47we+27c/vVZ/mod87M+P/dVBrkUEfg64desW3F+7d4/vkX32IzIdCuD0ZBaRb9+9a2afvHkDWpa1IVhcTBCgeo8TedN1AK6dHqh7DUYYLNzhdv3Fz9387Oe5/zQ5pRDMz5nItSy5AJA99CTNZla1cS9Z0iEG8CqnnomYpGEPA08kQPWpX3gyYHwV63zXtdW6zvOMcPHCkAw3kTMmAhV13yyRJK8ekv3oEJR2LzwIBDV4pDSye6WQ5mij4ZEUnvxWevCbvR6IXEzTVDZgPc0H87Q0emskZ/DRzIW+B4/hUmtAskJzjq0wjlJE9lzG5mJQX5P0Y0u+EZ+lSVcLAPjpn/uTYwh0RnLf4MrJDPhrd159T6F9esP1/b7hyR/c//sjuT7ykY88+RsvP4DZuixra2aefpw4cPPGDaZ/IUKWo2ESXzk5qUUO8zyCHd++93pb122V6Dur9N4peDTd7nzl9wCczNPVk9m9W1thTi3JjXO+tNTJiLukYVhLZRfv4XmOqTOzdVmJixBRhsQ4VJqDQHhtokJx0FKHTGBs/hTjMRAIWD9MDczuELSi81OcyjArnOJAs5FD0Lilsj/ZGMJGXxXSxIolgpCpxlVGbWTQ4fGYM0LbzC0h3YwwwR1i0KJwdOuA9G4iMs0zZNtqvRvFyN0ilLepzdgZ7hjzwA+q6Iu3b7927x6faGA5uNT8lGRiKU4+eFF1uHgURcWqsOjEwXLzj3/mhsh4hChrcbd5isJGCH7rt37r3bb1e8r2D/+Gd1PC7v7ss88+JU/17sh+zK/v9r3v+aevfe11UhENzCb3kxb9/Gc/a273Xrvn8EI3J3oD+9lx/fYbb1o3UcDQ3b723e8//8lPl6JwhwogVaCtpMrUu1/9Cr/4x569JqVKqW7dex9BSynlZ3/xL4aWikDapq50V5Gn2W0Ho4S4FJ41mjOT9Tq0+ABEvHq4aUP4+D46xrLjlBYFPPrTubtApmmmOU+kU9HiHtYufQ0kCkPSuoTAxSlHIxKOQU+ZznYXVDb1FewlZ1uu7ZCJvi+KbBPjDgU8mDYBj//LxmVkvkXvHalXc4xRdZzeaxwna2vTiC4GiCx0/q2bN1977TXsrOI4xggbKMVj9HuwW5hPzqFBoE5xV1Ezf/4zN30jxAn2UY/SwvnsYgHw4M3v1dMPK977hGskdZ4gw0++w7udDnuT+/26vg8dBBAWKiiJGoM7JUSge3np5Zdee+01oXHnLpQZ1eOyuo/NAjd7/fV7bn7z1i3hlgJUhYrdxX791/6L8fWHaQqAgASaT9yXZrc+9/ncTj4UBvtIpasI2zluIlLSLpDBgyEQZPuYLORKlQoAmbwOZWziYM1QHhbmJlDYSE7yu1QLtKh1W5dFMHthNWtkWwVwlvU4KzfpmjoRV733aZ7HUlHUNBJXIiKRcBuowhALBxA1tyAzdA6ScoFsGEOOXN8RIxH/VQMR4Q7JhnUjSEiIhVugpuiwFMmDfMwssxF0Rl586aXX7t4Foro5T1N+yfiqMQo69JBsuTZ2DMJnd3d86Wd//rd+4++n14WssMGzV0+/++b9xwrAj/wKHvP3f11yjN9Tn1/67KUfnubX/fXqnTunJwcjyFx3X20+UhKf++xnRfX11+64cVk3+8581N65uwHynd//xvOf/JSTyRnSemAeuCgn83Ttyom5eWcQx8Hd3P3jn7mloqLerSeP984ucAd1rA6GmK3bfW6fbF4v2ygkK1vB/Qcxc4QrJ1S0ttMlTsuUu4uMQrvMzlSnYz+21uZ5NvMQhyyMolBA4CIbREeiAheDID1kIkZBMa7dXQADSiqu1ORAlmuDVYQQhWipDHcxhYBkZ030dvQNneqkqmNbHZdlnuahqiWzAClRccTE5ABMQMfj5CBv375997W7Ch02EgmAMEoxwWoxfgO9k7g5y1z4SCoCKY4h08JjTzJmKMDVk/nsuLbzB/9KNPD+ejcl/Ojbfphvee65555S945fP/qxjwKI3eIdUoK/f/9m0ed+7I98795dAokCWmSdWQ+k6DtwPC5f+9rXWu83b9zkK7WUL//OP//ql3+Hv145PYybRr4ZbmZ/7Bf+fLMOmocIGpPxxNxRoNosih0lQP4Vumuym7x8UaJsgNJoE1ERowPo4I4yM7qm5IRkEBcC653qhKNQuIqMplbM9RCW3HtTWvIeoVwhL8KjowDgLmHvbKNQxoSZqx38n5IFg8JQRNGAtuR0uLv1MEmK6jRVLQqR1qJz9870ABxTnSz5Y1PDu2csgSqoJLolI4RxKkquGgS3b93mDd2dETgPnpGk8uDUuHXriJ7UIiKFzZ1z3TglX/qZn9/SCtui4tmrp3jYP//wrjnb+e6vS+K0N3c/mKA+WT9fglBcsq7fSy0bwOQc1Y5Fq0EMt9Q+duOG06ArKkVrnSqbgMKZgVGVY2uAuPu91+/VqlOtWjQ40N2fu3YFSILmqEAoRWs3u/7SZ3lQj4ODe3hdV8lCC0Y0kdxp24GIbVbpSY0YeO/dLND+njVS3HXcnxNTyhLG+YiYelrtIuAoaqlMoIIMIfxmwM2ieJZykUFs7npPxPFwDCNsxOB/5om0qBYRwhzDmkemaASUhIzdOeDm1ntjvRiipXAwkQPoralIkG+4Zz69DwWrqgOuRmtfg6efXdQgQvWbMz72k4Pg6heuXye5bsTWNv5rRvMSSTeCU9sCkVg8LpZ9ffzTN1hitruPisi10xOOt52/8x7C8aFd7yar7v4vDYn1hF9/93d/F9ZCVr1TVuPXQCMaeuD4n7956+PXbyPjE0zC9NajWb35+XFxRAnh1772+r3X7wnk1/7z/z0XcqplKsUS627uvTe3/rHP3I7YdRzRGeNUPYxNBWBzD4fJOnw+D/Zywu09lIEPbxNoa2N7St9q9ONmAbdIAYkI/CCWi1wVVHU+zCpC9g/rFqLUWreeJnwUS+tQn8A8TSXzYQAS6rYNpCIMM43DJz2BMcVxmmT7FQIkVERrjXtkbntd1kBKb2SFHIM6UMj6lVFQyyA7JzhmXYd3wGcIqk8Armpw5qNu37p197XXAO+BBJHNl0g/QHYbLnPP2SU9JxuO51+49d1vvi6Wn+K9XJ69dvqD+2dL9rb/V3U9wX5+sir+YMb200svgO9//81vfvuNmPG0yzxJWvgid7yozlM5PTl87DMvfv/rd5p37+bA2w/O3z47As5eB+ooAW8G0O7evQsAjsNUD1MxG/UbkZWE+7/9S78yjLWhTkcgCuw5KPLovgKiDo55bNmjmgEL4rgYdSllAKE88i9xoy3Om3Ey3oe4JmRAibqp1Eqsc82WTg4Xtgl2DKnhpfs8MJ9dJYSIn2VwuWehRsaMslo3jqBAWbkTjRXZLdLhUIm25FUa4SvarFS4knI7psOSuqmyb0zRdHp3FcPbrskwtZvKxuVx6+bNMTFUrSOQJvm2LSxJxZr5gICaOUTlky/cVDoWrCtErrNgqgVAO/9wo1mPNaH3WdknWLP76ynf9oTrfUmvu7/ylVfEnPT8Yobe4z8zX9e+rrauND3FXRwKBfDRz9wUwgpE3j67eHBxvH9+/ME7D7hy4Vq5q8hrX/3Kl3/7X1DcTufJo/suEKHdsnZfe09/zOFRfKfR6jcaCGoCobd9JSnfEWfyjG+JBKQpDPJBGObJ0xj7KhVsyRbZiLaglvZzqEFgvD36Ibe2IgKxJYAYNPaGY0m8U7RWqrUUJqLJpzNEiUdSBeDwRDixNd7IU4e7yFNHSqkpX+4k5wMYA4tCR2P9LYndeWBSdDzDSFR9oWsFCUgvQ2HGPDvcfVnXkuALM0PWPjYzd79+/fq911/nMDxV+j4XL5Bk8AKD19uhlAN5/vpNpua0qO0KHp+9cvKNHy4s9JTXowLMLmfvpnuHM/IjfAZ3f/bZZ/e/Xvrro+/nD+a9SIVnATgPYsBVurmZSWGAxwF0b0AHWLjr3757Z22doqSqDplLgXTmJgp3LQDgI9dOp1oTBQHAu/XievPlz998+fPuMLKux0RBUCAQJ14w7LL91gLQyDgf40HvneXHPQPk3CTOZrdmoqJaAjhtfTxhSaZIIHjjNCLFqacFxQXuXaSW0trG4M3noTfKR91qBjmcRE8VyYFkcnQ8uYZ2czd4b33ku30gxQAAqjoVYrIGIQCy7FCVnY4hlF7efIBCGS6UNH0S5BhSmqvyUJMOh1Pl8kOS9ILWrfUWkSfg5s2bSBZS7KIvnGIeEu5u3YYDJIJsrBbP8NM/+/MWWO9Oh4aD/+gzVziQf1Vu8BOU6gfTsU/z8fdUvOMV2mbRj8jCUgq0uajDRRUuBNZ3t6nOaf0J4D/+wgutb4XZcFdFSbsJ7v+vX/87/KJnrpzi4fOK2vLB8ThUpiQgmRiMtJKjdSicvHaBxkB2/OA3czdsXZ2ylskZKI2tBes9uGjHJNBWTVAHLVhWGGlagMhCy6nWUstUJ3esy8rnDjB2Pr9v9Vui2XiQXzR891F3QRBm8p5nCaW5td7Wde0ZgkNs+vgx3p4zwpC+Ry4bpcYppapVN00+T9M8z+ybGFGMdEVCsjL+jPwDDXB3j5LF1q1ltGBYJr2/8MILsrlgI9ANZIudHUVjJCB4IvA2Did0ljxOXAla3Senhzl9lQ/1eqwVzesJMnzt2rVHX/xg17jJ+7KiX/nqq9ztFrTPcdjT5ixaIpUHdzcFylSADlvRjmjt4vx4+/btF2/fksRUeRhk4ruvOszTPAXsAk4b1QTQoj/3Z38ZgECmWud5nueZHWqTyCfOdPrkJYEDvPPYbL2zu0v3vkVb+dDhFUbAyN3du3mPJgdrW1klEfhHESVj9PDBSMVuFtV/KrRPhTHj3miTq+qc+KVaCwlw5nlmrXJseEkXPj3BYaLrzoVImx4BE4nyohBREAjW0uvg2cZ78eQCZPj0mpmrwkr6mgcAoc7TFITdTNoCcOep0dKXBiJCkMlrcCQjvTT84es3bvhYcw93KAKAnNkyQCF82nHYdgBf+tmfdzAPEcLPs+R0mugG9/OHOvp92NclWPITZPgDX5cscMnz792+911+pdG65W+QGhYCY/yR2QLzblbqBAjxulCHuaqolhdv35Ys6Bv4/qJy79VXAJzM02GqO1w74DCzi4vjrZc/LwCBwsJODapaSiUHhYYPHBtMsCwLMw7r2hB5Y6o73c+HjNM+vpBuPiNW1L7R+3c4pRKV50WUkhIKfOAxw3cryp3fu11cHC0ysdvYSqZomNwptZRKRhkVZZQu+MPWdTWz1nsYmRk9E259QsAlGMOcTVDTtkU+rgavutnFxUXvvVIs2ZfV3ZPkFRqEejKKI8bBMPrfwUWESSDOgntkIziZ7j5A3kh6LXM3NwFu3bwpEEm+rzDR6T9wSiSeqmVppWd4T1X+8L/1p2IKNdqEAzJPdZoKHJ/9/Bc+qKQ81fWoBmanqHF9GDJ86Xr/0jt+oH8YqCJyuEXViruEVvVaS5pcAARSmllGG/X6jesXy2pmyWYMVb318ucAP5mnaZqiYEIrSw5U5BPXb2OEphTIKnFyx2yrmQXktGNHvrCn15oZL8nYUzJyiGQ55FZsw6BNGNFBkzqY3+mWKRmaw4yXMONj10KA4Mpe15Vee371dvbtH5ietmQ9Dv9UKqEgZr1nWEo2vGHJ7Dmv1gJxkrf3NEsiiNdaE4CqfxySnllfAElZoqrM9KWazAi7E2tOjvz0Z0UkqryiM+oWRWTYDJGjj0V/8cUX79y5g+xSwyFcGkuUBKuW8Ih5TggDnGWaRcSi+h+AP3N6ePPtB5/7/Be++Eef+dt/57/8IJLxQa+TkxMR2ZoGPxLHel+S/Oib9688Qec/4dc7d+6KC4Mrbp6oOrAgTJNPx+DW7cGDi7Ozb9KYYksdZRVeavBPf/ozH7l2+vrrr2eRPW6//Nk7X/m9Z66chMRAVXws3M/92V8BRkYFI7TH2EZ25OZbiMTwyFnyBO/dGUDmTg6b24ePDkToi/7oaNfSrQ+SSeRYfIeaVJV17e5eay2iPQBeDAWBr0/TFNaGu3h2aEir+LFj4XEgHjxhNcfCbFNR1RGBxZiTbMdWaiklAIySeowMgwDWdXWzKUp/gcx8pYEFsIiyqEhhLmH3DdHHfSTBJNyVCBe6kzpnROdiD9EUr7WmPocIbt26qVlgaA/t8hBg0OKK2o6oIu5mX/yZP4FwsZr1ZtGgWU7mear1sz/5ebwfaXm/1yUNvK9kYER6/Lp/26NYjrGHLl2XDOZHr/HBMUvv9qWXfu2WcR0kFSjhFFMNIsTCmlQpRdjXvVt3QXdLRG1YTAzD3r59m/YsbfqTuV49PUn9RBiHuxtLiAEgOrAAzL4GnyvPjkvjdk2SK9lS1zEi21PYJB+VpmatIxwbezUcUXLbbnQZ1G378qH8x8zWtbXeudvnaQKwLgsYcEVoxvHtoVVzLGHe5FqamarWEg9WnNUFClUlBx0AN1/XtfcmQA3WrUClkXWAZi0pAwyY5pkaHzk3PGCwYxXq3nv4Bu4WuKuqZdQ68AHCaA5eb7u0L1VVax0e+m6Ximq5dft2rJaIJDDa3MkJilFWFr22dGSfn3/hpmeyXhLIfpjqVEtR/Xd/6c89WQZ+hNclg/kJMnzplfcU1Cfc5z1v/thft1cyMlRq5fzS/8p14TYDEPW6qkHkwKWx1gHv5qwefemll0opL37u889eOWFSQEVJv6qlQORj11+89bnPIytqiAJI3AiGPhxPGFFhQbqXJWoBABnFs/sLDqDWqrvWavn4zN+Ks0BAw5e2KDf03lqcShDNlEctpdaCoGB0rUVIqWO2NW1QhQwymZBBdzdYOJkObPAnj1hN5nKJFI2Lby5EWpClNkPNm4o2s25tbebOpnj8yk7KXI4wqSF5CMGgGq0mwv6nRs2uCyVMjW1/UEUjoyUQ8TRsNKiqh5pHmu/RDGp4HXAId4CmugbGcEQE7p/49A1RHTY9v3Ge6lTLT37hpx4quPlRX4/6wE8jw+/LhH7P6wNIL4A7rA/LotDoRZU70tyjKjBSCuqOItVBszHsUhWt0zTVSTLo6O7Xr1+/9dLLDtSi5m7We2tu3c1F9NbLn2dgDEk6hdg8WdAGkCIqcYix2YLvQbO9S66qDP8sOlo66OohoRBp02UwOHRSkWJJiEepLbXWOg1Nxh3Ze8cgQjYXCPvpHo/H+DqHmfF4sSyfihPZwKh7qaoBc5ARta1m7tmpIEPHGTlCxO/c01vJ2TEzLQr3dV1E5HA4eIqciLDchHcLAIZFdivek4WXGcrva1tpRhQpomIs1tg6MHi0dwDcOiAZ8t4lCAmXNrtx48Zr9+7pNoacxohuDSL/tLEdZvZH/+1fmGs5Hhd3m+Z5fOKLa6Bk//wv/bm//V/+V+9HKD7gNXoU7Z3efQfw/Z8+jOvphVmCASbUakixi2kquBGkTBPTdtSII9AoIVyb7Qrg1s1b/+n/4n/59Ve/4u6iYmZtXUXL6enpT/+JP8M39812ZbI/zO1MxMJ96znML22EHyP0MRQYbQYEbp7+LRumuUFhpvmUYwbGcKirKM+QePisa9xypcMoACCCWqegmIW33nkMjeFYPOFDw3F3k/B+hxoiKVmMz9yRLdhHfpaeek++eQz30kZ3c2VAYkDhyAwGBtMg2ByWTWboDCDxG6VUDFeGb810LoNSo9IIyT0QONJslsHnF1W3fvPmzXv37j0yHP4Q7PM5lcHRt7b2C7/0F+jWXLt6VZIc+OK4vvXgrJnffe21p9j8H/B6bFXwk2V4O4B+uMsfLo14suK99MqdO3dfpM8SeT7GOwYmITLqQGRxoTqwsTzIqXbiYKUgQbob3Ivq/+CP/czP/OzPwY2+1vHt+90cpf7+G2+l4erBSQwx7yGE7i7ZQoXKd1czKCKl1NyMoU8ZH6JqvTyc7HGbw0G8wYGglU5zHaC1j+HOpClno4UIIZlgREB7t+W41Fo9/+RObsD9cPDQcEA9KmT2qHTuWSO79wf28Z/hhY7jk6faxcVFa32eSxCUuDOCXyQsaj5x9w6gJwssb6+ibt6RhUrIqALnR4TDAzY7x83HssEtIe3o1kBOn/Rqgojntdcy3D0wX4BdjtlIEprcP7+4OhdwA/UIrV05mR9cHAHZ27Ef6nXJft7L8MnJycXFBX995pln3njjjfd7w0d/RUah368VvV1BuuQACJtvrSG0G7JAx1XLyK1STVWRWkrvNm5to84MmGoJgdGKqnCIlr5evHN23noXMyVsoRZkJQApk7p3c5M8ukMXqHvvMaV86M1+1lqkh+Ho++FIMjEi4MZkvTMYuGdUJACDEpwu1E/7uXpIfNKvBDBN87qenV+cX7lyhZHqpKPb5BGxYS10KW3qbqpKsdLwHTOjxv98JFLSFR1jZdKvm7Epk7uzbktVKL14ODBAo6J3W1emYC3YT2xDpQ3zO8bp5NoehCYyMN/C9BejY6lFVaKkcX8AALh1+9ZmrMW85iGWgiEa4FBze+v+OZMNcaJJpDcPcwVw5+5r9UMDZl1ygy/J8P5PJycnzBITifUjuT7ykY88KqtPo4qZQBUgzFUG9t2yT0rzwAaKSqFWE8CsO1sTSnCoI2ghvLfc6+51mmotUIUC3dGNn3/rnbPEEeTqjroV1pxpgWNtK9EOO9xOKn9seZIxWDffBagjijlsxkz6GJBF5nhIGfOifGBwaCEADsPcgwgLoXtU9qO3Afsdd8EIxXmSJZvZsq6B1icVtJm5KSTKADCiSpkG6pQTSU5mQq6jNM/XvpZSTk8JWAqKQH/YveS80M121jO1bhYt36J2N60gz6xya30j+xeJmiVyVvduu50U8bGxkPkRf3hEuQzMHoer3GlN5LkokPOLi4vW5/lQ6zQlp5e7T7Xcufsakmj/w74e1YePCs/JyYm7f+pTn3rsX5/y4gefffbZJ8vqY4WZr7x65w65kGuttVbVku38AmfOKhqVkh6MMHbaLWzRfYzXzI+tGVyLirtk2gjd4QYhT5O0bpUZRFHfwaOwwwkDAMTMW2MPWuux73sUro1sSFTZRaKSEliK1lqiLHELaMN94K50YC/oHI8uhp7Bd4SYBZIjtq67QGhaiso8zyyeZ/8iQYzYdiGnNL8VdOBbZ26mmcFRBVLyADO4ta5k+vVoQOqutYxMnQvgZiLS1y4ioTpkdw45rVlHwDDCssIohnbUbAzukQCLj7bW+ih7EDiwtRqXLJnKMIAHBxY8d0Ne6QtYNAqWZLGmSqU+4OV0n0hKZn5xXEst1puqiujgmz5M9dGd/SO8HguH9oftZzycKDo9PX3w4MG7vf/pr09+8pP7O1y64aOPdOkVlunwZ1Vx8kINOB0QfKG7DzK/MpRMa91zjciJxX11ODmgFIjCDWZwE9G3H2Sj83FGI7aRiAgCugmgltKzkM4dKswsFouCv2GLBQG9iPhANYZbjMI9kFnJsYsAjHQx0hwfxmSPaHmEhERKqUVU6APSOqaITXXqra/renJyYm6aluY2IgFHJMBUa0/mZs/axurkUDDrrdM6H8mVUbFBcupOIgxzB1pbW2vTNNGqbC2LswS1jAZ/3novRVmiGBHw3mVnqHAdG7GpzA9lnsc9mlNYtlPg/zNIsLdMOGBP1ljAuwPZTaKk2zxcr7CNt6Z4Hm+3fnaxqKi7BR5bRERO58N//d/83wCcnp4OF/RfznVJJi/9+rGPfezBgweP7a705HuOnz/5yU8ygvWesvrYwyus1uBlStNToFogYU875RhCtTKkjsx0ZsYCdxVZW+vduLrzoZapUs7QDd7dXEq9/+Cc7DG11mYdKUVb2sZ9XVupxZJ5wlVjpdUqSywAJABj+OQytqM7Smr1sVOLbOhDjzb0SKUNQDiisO56mWoRHah+OFQUQScRQRmqFsDZ/2SeZ7Y9ADBwCvyaFs0AHUzuqAbSWqUuaxOg1MImwHQUlERbW/wopJd9zVWkr00EpZTWuyQXnJuVUpOnU0Qw1QpA68Zs0rIHSxq2AFCn6uZksaq1jnl0t94biWmnyjtnYDO7M7g7s1Dmfnpy2lujLUR2+KlWHyTAg0da09RHeCoqsnYTyMWyXhyXqAXPJYRgIOlqre1DoOkYGpju6BOEdi9IInLjxo11Xc/Ozj7Al167do3q9wMo3jChX70jIm1ltVifaq1TZUB3u4RJlN6tX1wc53mmkmCGotYyGyL5yaBRMAET3wx4J0oCqqt1F3VSF1PwcuONAIiI0NPRstlrKAG9pikarnIt6hobz6OFQtoF3czHxtOE929hFgFi4zVzPz05GRuPFUUjO81nwsMbj6kQvl7rdDwej8cjkZ6lFMo2NDxBkci5AmD1IBg8EwFEa63zNFWCS2jiMm3dE+EFZM9SaFERtN6XdVnXRqtd8jzgESa7BX50Z1SNYJ2MiKQEPAbJ1rUZMhCM0uqMIsvGVgmzYLGd6jRPM9vAFVWWLu7hmch8VZyz7iwEY0zOAbahcfezi2OmqcLAhvvvfeUV3urDi2NdmqhLv76rAnR/8cUXP/GJT7zfr3j++edffvnlR+/8NF+9f0U1/KtaqwMRo4i1gjnc0Lq11uFycjgRUb6nSJnqVKUMS0pLmWvpvWvW68Ld1w6H1CIq68Kzoh+PR3pQW/gzVf14ttD7SbVTUoU9tPFUSTrn6Z1K4saoljR5XvNTGMYybdmp1nmaemtUA3WaRlPVNF2FfXNdosCGKGSEGeh1quZ2cXFBQnUPak9YdmC4NO3jNOeDVBEfPoyZmXWQvYKNZyBM2DKQzcDv8Xjh5icnJ1oKiGVrjdgR772M1Gvekw8dUHKPVzwbo40g06CSHR+MAYgM/j4OQ8adJWhCLHx/k7B0ICIkVQpdu+Mx8Jxc3t+6QdDWVSC11vO1PceSw97pq/zel195nAj8iK9LqeBLq4UnurjPP//8tWvX7mVfove8PvGJTzz//PMfWPHur0vLNJQGAHeowEgPmsUwQzdIKd27aCUhlsOHPew8KN2hItOEINPxs/MLQXaTz6ng3ltb07hkWHas6QmEh0g3Ew84Ruw9uAMmIJJ/P6j4KbMeFEXPbLBnPllGwDk7BDl8lNMEfHun00Sk1jpCUxx30eLma2t1mjhSKn9vTUQKSSPzHpztIVBVpQxtSWLKPD9GZimrHXo3uJm1tYnI4XBQEdYZIFWcCOs71N1VpbVOa1aLdutmPkWrJOWjcPkZBlTV1oMEbzwrny06NuTuYbCxBO88cuJgG8v+lscalCUYRcIABFsRSVQgOYBa68Vx6dvhF9YUUn4+bDf4UaF61H5+rBhfvXr1C1/4wtnZ2fe+971LFvWYuitXrpyenn7sYx978pc+9pV3e9srr7764q1bVHiaFA7bSpmbeakbotjdtZSMx2r4Q6xXC1pvTEXLVCI2VARFYY7e3z67GLBcAYTA4zRQo4ksmA3xorK2VSCRrekdhDcnInCIEG2xiNGqSOgzH4PK88jHEMys1JqgK2EdfNbPwFLHjDSsqLiZ1jIEin4EIGY2TVPvfV2Wq1eu8LwYApUzpg8NSpTNg6xbDVPEnbvc41TYAYkBer+8qCXmeS61Mra4Fy0BQ4ZNisbXZLhMi6JsgMqYdO8DTxdnuUFksArDR2sFWlnsjNq7jGMpA8WcWkRR5GMG5e572j/PDJcnZwLB60trZxfLlcPsaRt8+ZWv7nfth+cGL8syUrKy2/F4RBU/9g48X65fvw7g7OxswLb4kUfl9t3u9r5eIb9kxIoz1RmrxthhGfWwAFgQ5+hdRPq2a2KhQ6tMo1US5cjgtq5tWRuSI62bkeBppHQBmLkqrDeIQMlfw5oZmaeJvO2XNG33LoO/BkCz3fZjyiUKBj3hgMzlZqSY6STxCM+EzI7g18i2mBl6L9nceJc2dzJAAljWdZqqqLDBRViIZjIIq6VM0zQqlqZpqlR9mejdYIndevZ5E5YNqopIWUVKKYfDoahGG4idqoRqITElSLAeYhMSmPmqh/aBo3dj1SWSCYWrWoqiKBXysq4iMtUaJr2HpT7kl2qcqnVt69e//vWh4XPNfIs6qlhUKYvWwkViQ0QzO79YrswT7Ss3/72vfHU8LU2gD0OAL12P6t53s5/37xn/np6eXoKOPXqHDya6l1589c6rL750W0XLXJEwoPOLi8M0UfFaaiTiFOlG9W7We6nFBEkNbYd56r3XUg7DbKYUicLsnfOLWrQU7YcDuw2XMrHkHjvbsrVGPFOggM3MvJZCQJFngT4vPhBhFYnVzZ1FI7kGjhrAmjtQBOrSzaGuY1AZUefGbd1Y9LuPvDAwzhQp2TwAsLAOJyfLsrR1PTk5mFGxOTbZco1GnzwsvJspVNQrokaZwf5h36Mkn6O5KxSKkfJWVSp9931jXoBkfzERnQZ5qZXHj8HVxeDDmuJ3snXyycmBBL7hfghnP5Z75Lc2MsFMBI8t1QO2EsA6S04TinjAP9SZAB5qjbex6Exd3N3cz47Lx/QZuLv1kWv8sC8Goi+xQz6l/fyU17up7nf769O9Aib8A6cIqMhhns3M15YOi1iQKACAJx7azLVoKWVpC3ddZAfNvZuoQAtICqDy4PzIPRAFMK1j3uZE8tmSM7hSwRyXpZZd1yCPAM14RaSYdbZxmOd5OHd81EiPoQ+fa21t28CIFkUxF7T+DK6QVG/uPiJr7u7oGszVrlBjmhlO+8t6b2tjnnWodH5wpTSVAnhvvZvVUmSadHwHs6Q9yi93UflsHuGOdV2dPowFpAs7UkhzZ91w7/3ieBSVIBnLik2Hj4Sbu7NLlYieHA6M3XnWVVMzx+JopCXGtGqWMnOKu/VlXR48uO9hwsUu8aALjG7x8tDuT9S7g34IAQR8zvPjsmbNigNf+8bv77fshw2KvmShXJKZR1/5kXzj03zLY19JMwegPKgGtDKcSIYbotCN/mFvfVmOy7rSeeJ6pcJQskcEFM/YZEib+XFdzRzJecwN7Y/jMKAl282OyxIlxEHGwqKcAfIDnDAIn6epTpNIEFbbRh7uOQqFgGgTjjYJnmLgnInjcWnBnZ5yHSeYiGDAMDhjvIOZEd81TVM3uzgekeN6xGIKpNnaGqfLeq+WRZUiwgcutQZCVRKhzjihO8HPzLlHrGkXzt06HbofDnMmEtiLjaT7ohJTYO7iApWa3kLPeJiIqMilhA19FQFqUn94kmP21tz82tVrtI0D+JNPTk3OtQmICBxBp0kPK8gu3Z0WhIq8/eD8x597xt1f+epd7JThh2dF78FY72k/P1mG35eW/iEN6Tt37vBIHq0MkCBJFt+WotQNhMATXcNMKTH77LcxasJPD5NqMWvphQrcH9w/L2kfcb+1hOKLbtdeIYvK4XBAIAIBMyYg2QOb+zCYLES9QEervTGBjxuXq4Z1HewfOS4zM9YFhOcJ33Ya0Ve1eO9W99ENx3Cwp2lalmU5Hqfs24pdULCMcZWHxlW79VqqFnWDis7TREvD+lYtzet4PBLtldRykTULq9xj7E6SUQ3G6RDIYIUMak+BFk29bdZ7l93s09xPQ8UpqMbdkPUS7iCQGgSFl5JuPmez997Hm1V0quLuxgpEbNLoAE19JAMBv/N8Wemz8fQ5PT2VZKhy938JbvAPYz+/p4qWXdzxyR9877cJRFBr8Khg57YBrjpQhjxwyeeoEBWHuYlHvrcWcfdSdDrMYHtac4ihG4AHZxc+quJSvbMDMO+uIlJ04JZjHwJE87kzABPojmFF041E7gRrzUe+N6IwZRS9cVAC2MCZUIWnTi7ZjpPlSsSe0YlTBNIh2OHh1gPOpVkqzLoa635clnme49ApRR8eV1Qvq1DsKkfgPYZh7uxpwHFYcCabqK59hWOapwGWGnUSQl759NdJiEEftYzGFjkxArH0ScMmALDbr1PE+ocpEcGqkQfOgw0ZZzLJUmwA5nbn1TuqOpyPsPQ8qpeBKP6g3FoWedVSJMPmx6WRYuGVO3ffXRB+xNc8z4+W5v5o3eD9fZ78ylO+7dVX72zLG2lhPqOEwWgu2wdDimlkurkrW2P7XCoo291QCjyRf0DrdrEsrZskHVSpxdbVAR3pWRGFMETJrYjxojs8lM1wo8a/xNLy2UAKOTBuSgM/Mo88kaiQPU58eOJ8FOhRhGhxK2BYoEWUqcjQPO6dyM59obs7gGmaez/vrenJySg9EHraIbyBmFISQhuqu8OyVBdw99Y6DzYzi7JEVeu9N4PIycnJ4M2CR3qNebxxRCWgguLqdDLjgbLMIDaCuBSpCKKQkoodcCB1tLtHNabDA1jjQSsb2PeQ6oxJExwmAEGUPEqsO9ylAMyDZ69gCnCdplKrRBMmNbOz8+PpyfyVr7469u4oqd/X5f7Irw9gP/+QIv1DynOtdXP4AqnoQSdM5EPEnlkT9PCSuQCYg5DdFVKEPUpdRM27m7Vmy9rcnc0Hab42RFBHRbp3Lpm7kx1TEO5VoYRw73nkZobbiF0WgzVGoiJQPj93sIdNrcja9fAZPU1PbtKoJR7OXZBdlVLIsBvuIcCvXo+rVBHCSyRs7HmajhcXDMprrUgps6HHHMhAHa3auizLQ9UFIgzfxYEaJYR2dvbAep/nmdap5LHBQzaEH7Buoiha9mAsCjazfIIN7uYRIMjQtyqDTozFk2J3ZzxwH9N8YjezIPhCAMQ7D87erbWVU6yjyXj+65Z9yc1EpKVjD/e2rnweE3P3N966/3x55pVX7w7x2Mvwh+oGX9K076l439Nm3l+Xbvs0t3qyhPfWyOJJKRrYLBvnuENkQxNM04Skd3PvDGLbKu6+ruV4oclAFs7Xm+dnHnGprqLsEMs+nW1decBxyfIcj4Gyo8dW0MYnB8xstNfZYsTbtmRHTqhqz+nSoG2NijrPfCQQAZcBFBm7sZsIYNFtabMCODAKtrGEy7ynu8r9f35+TnEdguZ0AMcQKGhS6vF4fGhds+yf2bAwSs3Oz885pAf3H0gwj/C+GUTbNgiYtilBBWYDozPelc8UAWwaJKyjTyQGEPYY/VyLKqW088cDjyFlUMBfe+01RNZx91CMTnsuYjrkZv14ccEVWtclIwVw9+V4/sxhtF/dbkUZ/jAEeJ9DevR7H1XFH+x6grR/MFX827/zOy/evpUrGb6KAL13DY7RkC0AIsKcjSNZsITM/65Frs3l/JxIWw/7Ffqt776xts7IBVet93Y8HuV4RHjE26pxe+hY7ix0dbOi6oMWI9sdIB5qz9kYqpQivh9vdFFzRBZnG1kYyEe2a0qV41s81ZDNkeMLgSGie1nj0JbjMRzjR2RtX0ENQS21wj0DZ/F2OgtjtVgqMU3TycmJjHqoXbhiD5YCTwh3iTo9qaMVQ14q0s0SNhsDFpJ3FTLpOIASZV1w34qBPcAeAoxGsdvGMrOpToCbBQZL8kiS7UL6I95WJ5HJ4XC4PDTgy69edoA9AZUfEhjryUI7ZvhHIsn764OJLq9SdJrqnlQ8o1aj1xRNyM4+0GMNxltZM3CYyzxXyRa4UnjCQrQcDnV3XMOsLssqhCLVMgw6PsBAZez3ZHcXpBkrMtKQ8QCBo4wI8/ggKaDNnTXnxQsECLbN7WiiXzCGtmkKxmitD7MzNl9R8cdsSABuNanmZJ7nR8VtfAt/rlevXMG4AXsdZOPDoQYJ3rh69erhcIjpGGjhCPOK0IcRIRO/u5F2gDjqMhzgtORpcoy2kQCwa/JSI5e9Z59jyjcoc7kMeeqg7wjyr1y5wsDYVo2Ug6YaH7vH3c/Pzx2Y5vnq1atjxke08+LB/fHO/adE5JlnnnnmmWcuK/nH/fx+f32s0D76HrzX9ZRC/sOILl88HE4OJ1d8V0OOrfYLANhqFGNc23HuZs4G0e5+Mk/XTmdJq5s2+Rtv379y5co+Gjxu3ns/HA6jG+t+4SRrv8euI6bdrLtve/LScBJRW3avRTt4c5+i5BiX9uRwGTZrGxBB7xa5luT08egotombPNwKnA9cp+ns7KzWOliTKE75RFSxTIOhslML/4BMnEqa05Q0EdFS6KF5thdSEa3KKnoRhTv1tgIoyorF/LrwEGiX8CApIytrcc65+WrmZrVWOifwyH0XRrnDHrGgfXZnYxs+fvceKfpMx2va/6mKL+/oOPo0oOE8JtnHmIWTf+//8f98dNde2sGXBPvRn5/mVzwsb/6ItfzoK+95PY2QP81H3lPCv/rVr7740kvW+4Z5SsxBHpexcOxiJL2bsEQe7t6tF2E7B2VEA6IR9hK5f37EqELLGVDVw+FwfnFOsn7fL9xmGNM65cx60SLq6mVgJPoQCY16VRGwkxbNPuaLzbyta8omaUZCkeTTUjeSPRlqZhKt/1joBkDSAdzHnEOfZZFTCp2cnJys69rNlswnmVnEusrWcYIfr+M8CFro3dLwa5Z1ATBNlQi43rq70/Nua1cVsBIakgZ/LC9XrtbCE2vYCh3wASv3hOkIhIVREqSkZj6CWN03dlJx4UmpIgL1bN0gyaugIbtJaLJ1ghUnze0YnZArkwT/iXNx10GI+fCufRpZfez7H/srnii0j33D2JePft0PeX0w0R3PU1U6sg0Voh2Ou3ce7gItCpMBvxVVUgK33ueqbMs5zzNUkZXAcGm9t24kwzA3lSx14g2jLaY7nNSWEDF3X1ZRQSlFSsAznGApAEzJmKqI1iiK4t4AInglsc2ozIoK2MrI3T1d5SyShXA0SvPQ3S1SG0KpK4GAiK1ZJNlwAWTJB/arT6TQPPlxWdYlo2KMErJIGC7CroXiqBGAkyjM6+4FW6pmbev5+YUITk9Pqft1qoD03pa2THXiicUJigbIuaiqGgFAFSaW+Ni29ZKBs9YsyROqFmtk+PFlXVmRG9o4aZNGBMvcxXrvBnHleT6MLOabsGFrsjwcw2J3eFsbXSmNTpOiWgTSrffWaq1f//rX35e0vC/F+wHeMF587Ld/MMF+vwbzY7+41KJwa7SufG2t93EydgdKUXdDktGYmQhaI9S8AF6nUjWA6ZQKwN9+sOXqyOY6NLw72tpMhPQ6tRagAOitH1ub5jl3NECcfO5MTViRiKiL73amuy/LOs+TCPmrhM3xRMVdLRt0NVYEiGQtVOBDkFWEPZofdEj6DdSNBhOjNFOnxZ8FhHiO0RXVs9Zab4f5UGutpaIiRrccp3kO8xiIJlDjoCqq3b27ra2trV2cX8B9nuZpmhoZBx0CFC2np6cDYukBPXbPCXIzyXlH+iGcJi1SK/1V31s7RUuRkkjJAd4Kfn1N+Bfdp7auJDGBSDSACoCY3rp1i9/rIw+eo+MDm1mz3lpflxXuRTVrrD0npZycnHzzm9+8tGvf78+P/fU933BJOh79yLtd/nTXk+/89C+O17/yla8ULVXrNFWGJmp0cheBmJumyUfft7d1XY5mXXTIkk5aEIUBgm6sj784LoidA1Yatli7NqSFz9BbdwcpGa5cuUJ8UR+VQOlD0XXsW6/ch87HWmqpfAM7LbmZCaRoqaUS/iki7FPvgGRDbHDblyIi3aytq1kXApJFIiynhTFli9tGm78YnY7RsWIndj5jpb2TxlNKrYwIWO+N1iI9wPFVcC/JesNOp6XoPE+EaMOsNU4VZ0PdjX3KRaXUUmuFiLNbUtFsihYoj7DaNbhXQGslg1F8TyLIQ2LNHSyNKsWJOHUTkTrVUqtA6mgeJ6JaKAE3bt0Cj4DI9qUNExMdHSEGm7ZlMRORuu677jJPdz1Zht+X0L4vEfoA16OS/AG+9/LrW6YofEW6w9268pewngzwWqd5PrA+lc0iS9G5VoiwqBhFUbQR+TzkABCgZPNKcw+B6S1rD/q6ri14WoqKmPW1rWtbiwg7a4sIdsDeAPyn4+hALVk8xwIei66Lw3Djvp+myspToahp4accXlWneVYtsdkyv0Nxjdw4/e696JHwXaSqShGD12mCYF3XeJ7e19aCu4cYabPWWk0NiDgsValJSym9dQFUyzQfgk8gqZIjZF3C+k0jJCqHESG6aJjmBKaA/hAcXlRbOp91x2biSLyrA3AtWjVakPfkuwpfxmVUkDOwR2IgLcovffHFF1+7e3dsr3hCJ0jbixapsi6LZNS9ls2Sp8Xz9W98Y2zW9+X9PvZP/v6ta7xX8PlH5Qy/m4g+5Zu//OWvsL51Cws5VIop5uRdaL0DwgY6XD51g6PE7vU6TwDDn9w72nprLYjZiHkK5xRUD6W3tixL73ZSCtxpZw69yvbYA/cOJ/QQYBkjq1tpssLNe9XK9xRVgZh1QEupQ8cMv5K6Vwo5oEQSiVSidj+s1AjDZbYXEVV1eHR12BZPICJMTUFQUHTSooVcdAn59lF+B0BUqyqAOvz2ZV3hPk0TI1EiaOuK7ODI2iDGAiS1JjJyZYGXSi6ysq10jG1w30SjQ9NSaqnktSzZwMIGxTYbtLGQBchjIOP3zhB1snk5Bsw1q4zg8Ju3bt65c1fCgMayrOxYRw0vsuNzyhZYASoRQORrr7/+7nt+283vJs+PSuB7yjDeRWKfcEy85xM+4ePvdof3Jc+IgQS+lxTqTBrRAOJ3VymsVOXyxYNp+MMn81yKMsaCCKrieDwGVCBxusdlOWEi012BOtVlWZZlOWmNaiAVqSPrUraGQu6hAHbp31gC96oVGekUEahbJ7BvAD+ELrARzGEGVlFJbLwy6MF87M9Nse1cuYiiyrbZ4rl77wQ78k+l6GGej8fj8eKiqEbZxo7ajT9UJqY8w+LuvrbVzFR065xSBr2jsikR8jFHBBiRh4oHBdFnvSOTTO6+tkZOLHe2Wkdr7fzi4uTkMHwJFyLgeq1VAOI2rPc15RwIGIcmk6ZEy1ndliUfN/Qw48+ph1tvHCB9oeAriwNeVcYAL+3Rp1K8eJ9C+/SK9wPr26e3vT+A6PKH49I8wrms3+xmVqVG1NDRtxVkmtc1bWN3J2/+iF3RAX7n/nnKE7hFWT0rIlGXLjvaBiR2kPEXHa0FUmWlS8RJ3G/RkEGA7mvQthHqKGLuFxcXbIUhsUHikKpVPLGYD23RHGD4hu6DCUiGC5OPIdQ4QVkRVNVtbe5ea724uFiWZdC+M8Ob28YBaNYDeanF3bPsVs7PzwlbneZo3BqJHFVKFdu06IhDAEhvgg8X02re1pU/B020ai1FNGySk8OBNjmTSUWEjTO0iHUTBONcVXUzd1vXtVugoOLMk61RN4hid3Idwd1u3LzJVAONCLrQIno8LkwRM1IfhjjCvDHzr33ta0+z0R/rN76vXx97h6d88Ud1vd9vvPSn3/vy78Ed4qIoKvM81Vq0wKwhsJU2FXXr7r311vravY9KgLlOkUCiDLu31i7WBQG4jEwhQdTmpkVFhbYads1xZLeC1C78Uxm8XBjhVvek+3fyj7cmwMRyWoCBFXMTwenJCTIgqipF9TDPU62iQlIKDKSHu7uxmzybj3GiAm+cMu0AQ8XkA3rsALVE61Z3X5ZFhvodPFsOM6/MaDEKJUpiQFPV3puozHNFNoDFdnjsFYK4+8XFRWutlqKFwqcGLMuCHX1HnWq0OaVxMUJbHv2KWm9O7hszAby7Klpva+81DrZwv707yeLHEUejDUC35oZIAHjg3pi58whlK9wYXFMVHltCRMgOoU3p/f9BxftkGf4AWvo9D6Cn+WtYjpmTZwVfb64Cg/VuvVstwRcpzk7dXqfi7vNUU+t6ZEfc3nrnLPJADkkRjfx/Ie4HOuk8T+u6tra6zyB93mbAbrNHkV2W43JclH2V6CG7r+tqvddp6r1L71pLcKQ/bpciamYzyBNtg0BoE1nj4KD2783qnMYp6W89yOscrAqMOkHZ5NySrE/RrdZCvMqyLq0dSp2QzF6SnkKl+g1b1MHIUFsXck/XaR5mwO4LArmB3O+cDhoYuyYGhUlbzuqw/QNc7o48OIIxI1hfhYk586jQqqlaFUysF8l8QCMxSDRP6WQAVhFA3Jxp4N7t+o3rr732mgRRuxNkY2yAmLaVJJzdtlrHx+zdJ8jz+xLa9yuxTymZP7yKfsIdnqCNv/LlL7/44i1Es0FREcte0RYEDwJ4pPRLKdne3d0rEya9Qya4oXeIHJc1JAdMl1rEJpyNlyI6VbQstizLeuVKZmXGRmX54YhgAaVOh2zeK1lLONXaC+3zaM7kUaa7wb/cnZa2uaeWEy9qDnHvZiB5rWd4aONmDrXfeiNplADdu1twR5OLjkLNkyHISdwoXod5Pl5c9NaOx+VKqWMXbdgsAHB29LTj8cjo+XFdHZgP8yghsIRYcZzLssY09U6UpmbBERfNeseG9wSA1ns3QyaL+KIWNfMiOjpNAEH+2lvb8B6hXbsDokoYigNE5Dh8Xda2NkbG3aMVFTKyL5CbN26a2bq2ZVnc0dmdECDtoO+5uCAi8g//4T/8ABv60itP/nXstg/84o/2erev8Lye8BEGZjXSfpKbEmbOxBzScPPk4iZbuJSETzrQ1nY8mnvrdraszhobjTyw5zey2s66ubmUcLl7hJayfi033rqujHT0RPWFH4s4XYz0jtgO8TXaY6Z97s4UqbF5N61eAYgb2sGh47hJeaGtW2qRUhhtpby0tdEaHoEuQCKxkwq/tXZxceRI6jxDZFmO+zE6ggOo0mkm7IGuQu/t4vzCgWvTzORoHifwKFRw2WisaUUUFXGzhS0aZOCnEB80iEDJUOsY84uRB1dtrdVamDAwlge7JSISyLZpQm9HpUqJHcMYgChEyIoSAeX0yOn33rhx4+7du1TQzBkCUNG+q+0UQbR2fBxbGp5CkT75DT+k4n1Uij6Azfzo9QTV+vQfcc90xnGZpkoDDZ6Bz8i1IKTHqRRRSzm9eqUIPCETAjm21Rlf3Jj6gzxRRGCRCwRQS2Wqqbeeshl7Fe7sK2BmSOIOIGDJbrbu92rSeoTtltIYe1VVIEWU5ToOrGvsVWBQoNJwhMMzoBqnVi1adLKsvwt4YjwZtFaqolEmMSTC3IqUeZ7OHjxorZ+sq2ed4xhjHe8O0g7BxcWFA9M0BfwNAV4xGs/AQ/co6g4V6WQJBNxM2ExUsrIYIoKk58I46ig2mm4Kl5/rRRhqMPi6IziAIBBVHhfk0BLSmZZalFskgBu0dmR/WKjqiy/efu21exBwaZWxND6qKtwXJiSAPYjyycbw07zh0d3/6Bue/sVLb3jCX8f1ZO/9fd32sX9SEVbbVhGXqCUCkQIIcl73BE+pCGSqRUROD1OZJ5gLGtylFoi8+YN3BtrRI3YjA0qx7dWENLTW1nXVovAAeByPy6g8y2cRKeSjCjUuIs3MRcqmCV0gDwFygXSqocr+olDAE2sJwgBVrXcPvhiCvWUUsSoTy8T9mklmj0lNEk6lkzMgEumlFA1PHqzkXdb14vzi6rVCPHZRXdfmCeeKVQZByMdFgh0iYlcS4NXwhGsppI8Siae07NSI3HCewUNSIpUs0RKgtUZOUGynV5QrDeOHI+R/pFHhZOp2xW6iRR7WkWY7KXeq8S3Tm0H0mzdvMqshJMHiWSSxwzjG1x+JPz+NMfzkX9/zlff14vu9/F2ud3vnE27y2D99+Stf4SYh+VPLogVl+HNUBRHkZy4aRSylVAgg2UnDHO5s/JOYIqe23q2j5Jkf+1tEmnX+iWahss6BARqe6aoqrMPFsKVLMPvAs+bPs9YivgtovTNrBeqVNI+DUzEeCEroaAxx6wpCCeEWC4e0B9dXpE4QLv6Y2d0Uh9kyHw4CrOtqiVA2d8pOgJnz/TheXJAj8nA4DEfc3JkoX5bFx3HuHlZAFt9uNn3+KdM8Ienhf7ttrrWZm7WhvXcPY4k+xxaGoNJGJLHIe0Y7LVV3RPB77623tcUq2jZKEdy4eTO6QJUSoX3E8UDq8OgT9/5l8gm/vtsr7+vFH16Sn3B9MNEdf2KnAcuK3HEW79wT7nTiDvKwBsAEggPmMDs/u1hbd6CbdQZOerekab60lJ4MVb31sZTmrjXqZI9JbZGPC+yRC1lXgF0cJIcQD8xSdieekV4x4z7Dr2YxINJli1GRXDoySb3HHyVdXw8LPUoj3Ky3blmG+dAzw+dpEpHe+7KsyOQ5VKd5rjIEUsA4FoCTw6EkOwF4YmU0nF0kMbx2lk+F+EX8OHgmEUukGj2sOGWH+bB5z4heqTr4a3OeJX9IBkuYuQc5Wc9Xgo8zA9HkClKKNg89fpHsjMjW2ic/+cnXX3+9KDTAJgAAIutJREFU0iPYkg1h1b/++uujP9j7dTKf5v1Pf88fiYv7w1xPeWT85m/+E3cptbDxXcYjtymVwMCGjeZAa71qKUXR+mit5MCx7fr1AMi9JjugjkbIF26Mlfjw4MZSunuppVq11OL7sQy5BUKn8teODldAUEM9qup8mDOZZBCUoiuHyZpn3TKaGIEhjydfe2dXcbMu6brvphe7x3DfFRX6AJMJOnyaJlLtXDk9dXIGuTtQWYjj7tZtXZZ1XWutJycnSPPdAetdVaZS52lyqvveh/mOXfaF4R9PxLYDKCKD9lY2v0IGVFpVCHNnSTRDlMRXAI29S/P85kF4XI60CJjKLwHzxDhxBJB01znOsmu2wsW+cePGW2+9NZYTyUE/TfX2rVt/99d//Wk27h9cvP7QF7/I1Sy19s4yhMhYcjU5w7vVJP5V1X1dllqrFHKv+tvnF9iZcghiLH3sano0A6H5bcjsFNjlBzg5OTB+ZkgmOg1UvAfC5NFN29cGRynqkqldXpKed50qg9vMgL77MMXNTVzcl2UtpahK996762GOgIBqAIcleQAAzsAYJoB5ni4uLgg4oXHBoFKQthHmuSxHifh+OofUusm0MG6nCRkTOCE13SL9QxeCx90+sSRwgUSJc0LqBCiiWnWU04fxkHQHbp1EYfM8a/YB7darTJy7oiH1dMJZMAgmkhkIJbXCOJhBXpVMEu4cqrEALEj8g+vpr4D1wgUopYp4t27Jz2wiPHAVflyX6MwAUYQPZG5qChHrRv+ZGRRGoS2DGvAtyDJUKEvRzGxZFvLv0xt/iB5kJJDCcqYrG0lNyuTwLUOVmnV3zbBICIzEplVRreoEDiHjPWajMK6ZreuqpczTFGw4gtZb1Zm6jP3UMvtTOExGjjDslOCW6BCptU7T1Fo7Pz+/9swzJSPnysgYe5HR95vnGbuAAUNGHvmxttVYcpwJ12acqUQRSCm1TLXWUZ/Baic6MulXmzuPUfqzKgSQKWtNIISP4+RwODk5KSUI2WutJ/MJ2CSHw+gPgUkyeT8AtwqAPWA50qg1L+XmzZsYYbMMX3ez1vuf+lN/6l/e9v///+s//NVfbTYq3r11c4/SPwIE6Ph18zrNh8NJKVWLTlOVovPhUMokInWaLpamLnTByii7ZyyIC8oUQ16U4VoL4NsSRy4i9SGxCunBhhTn4a2qDhv7dqosS4t9Owz5lSgGOrSp/Hs29S6lcOsiXVwtZT4c5nkexTjzNE8Tm05MkZEx9yQIC8UpOqRX9kSZ7u6Y5wr48XjsjbEBA1AtivIGsr+enJyM/kO8mIAWRP++dV1tx01DU0dFIBFqZg54nIVEUPCeTNL03tcEUTFCyM9GGD3DV8QDZEx6CxIM+LVLJRirPkqblGFsd2OIOw+daAdFPN1LL7302muv0WSL5TJT1V/8xV/8e3/v731I2/1fwytYtZPvIkq7iKn03YIi7UQx93fOl/sX6xbgEhxXktSMpOPGxLRf0AAj8CWpocGyunsYVQjsXWwhM7s4XtRaByiQ/Q1Ety5cQ71LBNW7OWoprHDgzuHWLVH0XLZgjbtnQWXs20wUUQbH0SG1uHnrPc+mbaQ6Iggi/H5EkY0eDqfH47F3X5bl6tWrFIIqQs4EJbnMyclhsGTw7Am0SiRmGRsYfUoR+nUXOVBV7Phed1LKikDKUmh1zepqJIBCizAthkzklszCs26xJAsf2bS1qCRxwcDHZQqaMbIgRnEPm6m1LiKlFhc4/ObNm3fv3gHCHNCkCLx169bdUVH8B9cTry996Ysi0aCs1JpZPlhXd5OiMAyzDtkxzBOVpdFWVmrR00OlQdotA8tURxl2FgirviHRcbfWCTinmg1ezEj3Rr6Xfq+I1DqNqK0DEB8bZUsJRRQl/EQ2CAbASknVMtqRlV3MjDdUFTgGBkFkt/eS4o/Qo27k1kGPCqRHR5qi4ZGUFZV5PpyfX6zrYn5F3BRRBqnHZSGRxzQf6NYrEEUUqeWNqXhVZIrZ+YPDzGjAsGoxTh2LeDQlys0zhJUPupnibDAjKMq0Ye8dcThFgHqsfeNEAIgWR8KxjXQTvQj6vXxmZPhesi2liJZS6Qap4MaNG3fuviZbqMzd/fbt238gwE9z/Rt/+Kef/7FnQXiFO1RBdzHUb4cKoNGzm4V+WuKvAHrzkfh3P784nl2sHvXrQzFY6AIRg7MnKDWTI9KB1jt1GrfFKAESiXCrI3BUnh7mpd3LDqOsiHSH9T6yIQD4jb5jQbfkrGPQNNod6SjO1VFlAEA0eqZSuQXRMqEWG3vXpZEyOO9xW/dpPpxfHNe1LcfjfDi4QjmHrBya5rmyeKj31rs5yq6jgkJo62uyZEQYz7ejboxt5MXBx4n/SKIenNq6ez/fFhxiQZwvmsAMyWgTTY6RmScGg26WR/WijcbKLGzCLnEt5Ad240PQNWitmfmLt28PW4DJg1/8xV/8l7D7/3W4BIkmBoA43924+VBKIFo34zCLZvlmTR5yBVTeun+eVhur81zYtzkjRfyOTVESt1SKmVtr7t5aa611VshsoB/J4EzR6BiaLq5ARFp6ldy9oZ08UlOx4zLsohkB3eZApJbANZbYmaH5wgLN10MWknAqtr0ow+CBrDYbmI39SBnKcvfjssC9tVYZIl6WI4DDPEd8PcRpqDpExggAoKSGHVlrkVLKfDjQmu9DZvIOe3t6EHkB0IBzxWJK4r0kmXcQoepcaTLaaZg5CV6JVHHgyBOaM7ICSHQ7YXS9dUTbSNYbA0H9Ibdv375z546PwimAr7y/3fzfv+v2iy/f/cb3kAGeWksCdTCWFRk3dniIQ/amBL3ZoPHe+q3QlHQzR9aBMhWkkvojzEARqbW0FkClrSRl2HcAdsuqgu4irL5zU9Uyl0xGhlbS5FQU1YccwpQlas+wG3IDA+H6uUh4bJkbEoSIDlvZ+KQCAArp4cYHSMN7j7L9LbfsAA7zvC7LcjzalSsQqRCs66qipZb5MKsoCXGgAjDWrZKxNYyIGACPTsnIHIy59UztqGpJHg/bwtQDmeDTVOnne9AVbCGoEftzd60gYbYAa2+1VoTh42vrHnQccb6RpJbhbkQMTAm6Kokqc3eVICipo3ezw7xzLtO3B9xv3b71BwL8ntcXfuqnhpqYtGaEKDf7CJEgTOxgRynKdqCtNUVRNarViK8KkvIN7q6lqKjDnYsOIWCO6whgqtMiS7cO2ZY1TMQsU90AEyyWEpFkxgOTru7u1s11KNJtD0NjNOGODdtwryGxE2M4DC69Sy3MhXSX3prU6tmnmjkzLSpgwZPBRUsVIJJMAlFdliXVFuZ5Zh54WZaT0xMtqq2tSuwkD7mI36G1fnFcQFBbJzyRjOpx8lCS+VeWSs61akLMw/cAFLvpA2haJO29Lcsyov/mtvbW3btniyoAmcOj9NKSujgurTX2Q2NermqttUpAvnaLBUy8agFIkyIkPd12FdBaX5bl1q1bBO1Z7+b2p//0n/5R7vR/na8tG1eSW/u4LBkHcSbnzL3n6U+QvKrWqUpGTFq3ZVnMg+1MAHJE5CoLZYhSVKepTtM8TbUom7mzxnA8EINDy7Iw8MHGmi32qpTg3KDJ7EHlBZlGEC7R/oII7G6xdKaCRUmK3ntf1jUVj3ezZmZsSECbwlwgpWipNZS8Wet2XBYgbFuBVK2lkGdyC6Z6sj5xF5dSTk5OJHrESV3XRobX0duSWluUeFIiPTL3G0G2DUi5Lkvv/TDPI/BYa0U66GE5DMIADJtYODCQ6UZE8iAAAPXRrYEot4xVRndSgRwi1BSPmrvIRYSdeHhGODBPUwQtoVwmni8PDZZCDrj7zZs37t69G+rD/8CKfu/rC1/4gjskG7lyYwiEqFua0L13xq6q5rFsJgjI9H5l9eQky7/TbsovGi14S1FkFgOAiE51Yig7Mia5su5+kg29+EKeAVt5E1PE67Ic5rlkZQutV7M4NRRhIXi65RkzdT6GkCn60mAl69J79DHfD7ZAavb06b4brAhfGYPVHa65FD0cDhcXFySs1AHeKKX0UbvDYos0RCmJAikbv3JA1WopJ4eDEP/E75eITkXUnwVc0Vg1DiSWWScAk1wq3iI+zP4xWJbluCxra5bp8pD/MMuArVw76k66B8FAN1tXMtehlGAtERWzjoTLxLeDMm+jXZuo3Lp5S9Luun379oez7f81ub7whS+McC51UUsmhm4tHK8tFwAAvfeL4wUXKALRCRtAmqDdbF0Wcvdzi/TemcZsra1rox3HZ6D9WWtFrmO3bWVHdGoA8hQbRIRyXkRODgfN/ngYIZgQ1FQ/npVRlLqHBtu5k0WQpC6EAvrF8YK9jjAg2THY6Arq8LauLQcbd8vuLcuyOmw/2GmeDocZwLosuiyLiBwOB4avRIX0riQeoEpUMHqcYQbA3WsZRoSOBIxmOxXKG+fL4FwoQWLl4lkiJ+burRNcQZoGATBVmr0l2DPGmQdn3yemc3XEBESCiwkoWqZpIpyLT8H57N1Yw8FUHhPO5ATHaKgHKUVffPE2jadf+NO/8KFt/n9NLs9FpI2aVfACYu88CtlrqSStK6XQkRORYNIHhqib2bKsbl5KQaJdo4dJtnSPtg8Z4XQg4hpUy5l3TIBj95D/cO72wVnNSLGMgCt9eIkAj+wCVMxfcZcNNFi8W6LvjzsUokWLKLNE8zzXqWqqvRF+A8QdbW3s/gkWEme+BSMdQ4RWVkdxmuY5CgwVwDTVqP4txRLVTX+YGaOhdfO4dE1igThIRLToyA8Nm4cuAX2ASHKZjeIhgM3ggnF3g8ggDu2BCNfR8AqAw8wlC5VVC0OHljVQKjLVWksJIzxPNY9cV/SPpI1gieLUQWIUaS+5desWVcsfQKOfcH3+C5/3LECzTmI3ZbwzO/0hpDEv5mR7VnRi5GsyTLJfC2xpBPDFWmslTp5hqjhBBu/5cJGEYrbP9qTIpz0XRmnZSbRnuqjskkS7y813HDq0nBEksiqQ7i6qexJIeoKEDBJSQrGIaF90N8rwtsN9qyVUthxIWvWcJp+mqdRKa5dkljIKL5T9uHeZriG9tO+VzW92QpID3kR9N2AMy8HhWvKIG7MIwIwtF3pPciP3YZxssUgPKNh4PJ6lrRMcSq8h00gZNe9ZTdpa670BologkCzYKMkJrqWMTcQZvnHzprvffvHFp9jJ/z29vvCFn0J0zWM3sM0SDv0VIR+hg9MzYsk6WHph3JasLqCOHZaVO3rro1oQcJXdKR99g4wNokWktbX3pmkbMiHieV5jhDfDzY4W1gz3kLaaGCA8bjOHK7nVsQZTs+8288iL9mjUY2M/t96rFg3KRjDeNiK+mhX1fbeZAWC3mc2dwG5nDwaHAqhs3JCGBNJItqDHowY1htrAM2Mb3kYRliPMoWRhPydLVcnzLGkb8CvorhiSkazHGRFY8DAb0pXC4L6L7jLb4RyhRXX2CnYAslUtmy3r2rOgDJFSTkhNvKcD6J7jNVuW5cbNm3+A6HjCpe41SMiSLy2jFXu2t+4ExTnPWhU5OTmMRnY0lJgdRG4sTdnDDj6lRS0YlXcMoRb8iiJinXSNiAaj2c7OWIjP9+aKM+wSZmuALOLx3XfPEqpSeMyM8BjPou08iQqnKEEI37C13ntRPTk5YX5rjJeGw6b2hqW55ZMhZdufQ+TCkmVLoJPDyWE+UEVuHBhxL8/8TqhuJSPkwwRFew8hhwIA1vv5cZlrNK3AjiMSLA/yUKeFEBYNL8fhzVpht2CNWRBInWoMXqJSFOlOlFpLzrU7y6LClcFoEAG4+eEwz9PMJenJeyASx5B50FUOR4We/x/Eot/t+kM/+ZOkAa+lLO4O1VJ670Sqc0PQxoGKi2X9WScXJAAImjU3r6XSZaPrGwxvGV4pKWBMloIK0ABSSTmmaTqux752iWZ/sel9i98W2zg98gJAjCSjJzQHkMXM2DuEsN7X1qdIzbhAeouCBN9VKWk0rxcIqErJSsvW4UrERO8qWqdKra4qljz1ogozQsrp4sfJpqIOQaF+nmvt01QnZo8cbq4iPaVXBK1ZjIS9V2haOIY+TNvdzd1YV4BgkxPAzKeRJS8KCNPuGVqI2cs5dMaTYuSGLsF1QL1da+X4LRcGaQ6VLS7h47bYOc8urtAVy9hMO/ksrTUtEdn3TOJHMlwVwK2bt1588cU/EOBHr5du39ZcXxEcmImzXoDVbfVtLRJcId26CstaDBBlObtljCZZoHwH/4BI2YpefPwfG+rS/uzaFTrX+Ww5a60dEEsoKirFeg/LttR65QrjXu3sHW+rJD4svit4J90TilvCsWL4rfYeBFIDJRnPm59wd+tdwFisurfIHps7umTmGY5RksFPDpUjyPIDEFbkkdw2ahqRBLfN8yx3797lc1MY8+ZU70bdGABjEXeL6BrgHsy0CHvDNRlDOCkELUbyTdTzLBuCy6XdcGq8T7xpg8IlLicB2AJFUoUMJ3kzhqPHlgRPYgzGzB48eHA8Xjz77LOHk5PIBYwhm7tkkJN+ddLuOZwY1499/GN8KmS2k9mynlUmnoDwxKPLMEwkm2zRPRpmyzbenM/okGiBMOPwXcB2AQA6m/qN1DeABA9qwl1HkoOUI8/92HMEmY8rDk5Bb31kF9bWCgH3noC2DfTPLWhcrG7Gspvj+fmb3/kOBgYwQibh3TWzY2YlM7bLhp0yzLX0YiyFLaggHD6VaIVj5ipy5eQEvddRDDx2vIqF5erHi+P9+/fL4fDMs89cXmJO0ck1US2lqoiZLecPfD3udvXG+PMh7mqVARYc+9azNMDTYM5dHbGncRbud3XvvXY3CbobRua9e6jBKChQXddVomd3HpOM8Y2nTxdlk34JYluP7Jdx61tw+nGUXNKQt5SN3RE7vgCwnua3QsvEZUGg6Le4gnvgokOXxl+d5yJjDT0oiJUuu8HYWyVi4skq2HqHbwDaddmAexIJD9/iax7Q+ZEDHK9EzVNEdxyBYcizYLNmYqnCETDv6B4RuzgLsEvObdKYBVjdUVk+js3H4TLzy/bhQwMku1XxqqUMNklPMyp+jSEnsiHkEMf79+ei7uz04+7BgMMpKCpXtJ61FcmUFJVy2zniktoPrA0UsNQ8k4l0T72qTO42SPMwdhmKs4LQzUxqXVR7W9d1nWud2YNTpLkfVV1LOcwarbSKutlyXNcjIOZWvIgyyhKDVZFBXxFqUGiH54GBzCHxsNttacrbtsq5HOQecQmR5nJHdX3Y3TrKp5HxX4pYFFcjs9gCoFTvFgQ6mcgSkVJ0lDllpivY20ew3nMhRKQtC7dad++tTdPEHRSG0JYdCi03XpJYIBeVOGU2WyLfr1KU3b07KGUqqtI7BkA0pde7dVHFyDPFNxSg8+NMumspEGiQS+PSkNNwC7uNQ16WJQTYXVT72pkYMKCt6zxPEi1juFFpiu+AO459xMKza9Q2ZCfjMBlDw2Ls1s297th/5WH5dcSQ3WyvEGS3Ur2biml5SAk/vCzbrUUle/ZGWHh8YettOS5Xrlzp7uuyTNN0Ok9aSr1y5Z0333xwvOBtp8NJae24LAoRz+Y1maHt+yGnJ+mAKNjjM1xfLpuKmMJxYCAj8yjd+jxNS1tJWg5BEZFSaZMf13VZm0BOpiAvn+p0fOYZuFUvCqiw2lFP5kNfzuhNx3SJDYOZonVxdsEht3WtUy2qhDFrasKhjBzRHDWOHb4eTfmwH3IIuqJkf9ysu43cSO8mkqLrzvS4qHjW5DBQUIpWSbQDD/LW2jRNJfs79SR8nqfJeyoZM6qIHAbMvagel8Xdp1pVo7ZbwCKG0TTMePpZLmGYK0q7KM5+7j8zj9i4094Il5Ut3QEARF84UosxvF5VNKHOEJSiZmitHY9LpApGFzzA3Nraaq1jyNZZSi7zYSKElUt1cXFx9erVaG6oQbW7LEt3nyMR9dCQY+rdMDZHhmS4xpxbTSpjEek9e0R4uGEAStHRXTlnxvJEd3b7KzXq2EMgMo66LEtr7RlEIDdPRdDNKA+LNHxX3QEgWbvj3CxFIHWalmVpZoUtXbtJrcQ0qOh0enJxdnZ+PEpyTbgIktIUIgazwWycfI4xLVDXLTLKBA0LWUSkONgFsJZyODn84M23FrQSbbIlzG8VdwXQlqW0hmlik7FoMSrSlnY4PZlUaQa31mFrLVVUPIwpoSEwkFu11t6ttTbYc4RrK8nvA+m9A1G1Szjj7p1Ci9CjlNX4caKsFGoSNXOSiR0mvDIctjc3zDsd/uEooRaplakj6nzHNE21lhRdd/c6kb8ntgdjtPyyQJC4T9Nko0igqBnlE9FMWTLDxHCEiHRncLj1zgrkKJVkSGNnwg2VZRbAgM2YFFkbm02glko8apx/7qK05qQ1Y4Goqp6enhZWu6V2gpNbRwF0j4xxAPIR5nzWYcva1lorDd2pRleaKiKlbBCX6AqZXgOiEhqAGiBycVzSnAZpWaL0LqUrbUfC94QMBzsp87Wtkhz3VVUnGrUb+wyHRp8W85wiIeMOtFH3stojFX/pklxoK4paq/TOpIiqGvD2+flF7xA8M80Qubi4EGXyQMisQgxwHFWq3axmJbfvqI9l931w6O7A4kodlyMg8+HQeu9n53P4MlxoEZXujuattXmeD8siAQpUMGjU+3y+TKXK2nyaII6z8+PF/dItKXHCyKcOh8cZp0XneTbSeog4gq9LRaBCugspEqY0gxDK8htcLMuANytphtIGthx1ntcZsfPtCOu9r23l85AoLuxZD2izirZmNfZdGP+8kXia/qqqUIiTvjqjW+NJUtPm92+NzgiNiFfBuASf1fIwQAK4p4iYCw8tzh19Nmq/JO6KQVIFDZIxOLy4QINewKzWGgqWykGVUYFpmlQ3/xDpmwgJg5rDs/shKW7Dqw/xXJdVyfHLUGRCZ9u6kNWMzk64xQi6Pw6Z1aeR3BORXV8Ivj4c4yGH5layKZZuJDV0Aqykb9bNxNF7ZzmKZFl1Ui8E+cNIm+49ZGTIYS+9YxvRI+D/EPoO9vJZV+v9/OzsB22l0TgRirTV4rK9VZB+jxFNtIBEIrUBt27I2qDxMOMxNs9I9co0n5+fa61M9Q/IYC1FIQ1o1gcrnSUo+KOnp93sCC9XTo8u6N0N1q32VbshFtotso9bAgOGnm3MgmKxtW7mhDnx/BUAri4MKMDDjedVVel/jCCrDPJz62OpQ/QMKBGwCGtORETNjKTOTIOS6XOQTAGouyhouCuBSUz5kYhm95EtyyCRjdrkTGBR6UawhEaqmY8aCd4onNsUe601mAkgoYmCx0fox9Iw1t0OCxs+bukMPe0sWGUIagSxzKy1dRwB3UwTdhYhCsJbOVT6nxCgQPo2O46zs7P55KA7k5i6URMEsL03s40AtKhQfh1I15SMAslAkjuF5akQJPXPiMaHGRapUxEBG/Mh+CRQyO+dEY7hPm1Qit7YHS5Hk6r+UV9YBtvZJsmaytDcu4ibLesSRifQWqvBMpOj1sg4Dp9cgnhxMxOQYeuIvcN77+7QnLpxnnTIuizIz3IOohvTPJ+4u8g7FxeeLU7cvWi5cvUKRI/H49lU8fabq1mZD2t7pwrOjkeJjAMgWsSZH2JMFTzTe8NgUAYsrAaXhyZMokEIALL8cD6dQVyCTx4edRoa8UqQ725hC8StUApjNFtkA24hoRkmCBIQt3QW3HYw461eH2Eixq2Z+D3MBy3q7tYiJj50L49Vcx9FgpKhqcCjuXumcPdbP6SXsQaRqHiOBgue+zf6Vrr7NFVCvUFeYlUH+CepASLtvbfWPR25AWbPgNM2agSEfQBCgFSkVCr3337nox/9qBaFe2/m7kIw5s5794C8xZP3cAgZ03F3L8kenNt0N2oHBAPiQgkMZ49kxQGjqwMqoBFkJKjQCe2NPZdNp0bCD/mcvveWcUmEAbqN+e7eW60HLUpEHpVJ78FMyoPs8qitqxAZEI8n45Qe4ssXE6hPs053xCzbIwNHs5PKAgGBoBjmeT5M05v375+7I4tqVFVULx48WE5O3nJ/y70VxZHYLe/Ho6q0AS3GOL1yT3osIQDrRmwJFLYYvQbVrCQflNHRODFrDMN0FLcc9SNzq6KQUZNIqIk+Omoui9LvjcmLJ+daq2rlUpVa2tIARhysRAEt4OxHnAGZiICJiEzTxK3OhI3L+Iukre0DCzoiBLxn+JzJcbUbmog4RMOScoc4QSBxSOfmW9Y1g7euhW1p47Ckg7lR5qe5NR8OLPum3qMeZpMbM4h01TLoC20UR7gPb01Up2ka4SXu43AndnrS8tLMVG2jJl/fwwLjAeZTz1G7m7JdqioE3Trj8MSHQ9Bb16luJh8gIrVWht/GLAk1kohkdQqfW8MvBQllHl6CSBeN5RaReT4AkToaimJMQhE0eMl0ZSpJyjaT24XU37IJDMKO2I0acC3CNu4QmJsijrDWWlHGQTI6ILK2tvS+qJj5FcFHrl7py4Jay9WrZ8vSzOhBigpb8ZRS1nU1KNBVRUtlJhDIAsAwLWOT0yWh2pAMnFIhco9tR+EQ4FhQh2Oq1dILGH+KyRGM8kQe3EN1M6UkEDNv6wqJxkZDn/M+0zQRE1oZ+WTPVWISSq2RPx6qjBvRRRjEyvbnUWYESJbdYhvV2J1Z9Bgmr2iJwPVAn8YBtovc0J5096qldYObaN1PRK2V54REqCtJMNnlGKLZp0aiqG2rdmIMfETO4ogpdM/imenlMsCGWtjc1c0enJ1du3aNsRkVIcdiRq2C7oPLGaYtc53mEo2qHcHLuwUtZFPFgeCjsK2tMTsFGvtwkrKll8iTkmwSPas4Jb88s/9mvfdaK7NTXG4m4nrvpTwkvTxza61QKR4+qru33quqqcEQIXaXdVkwYP1I/go+xKC/gkxa2NtP8igZ+UyquvSdHI6qpZmhe80ENc+LOk2qUiNPJiLBp7Xy4BYVyPH8oqoczZvqOnDOAHoEX1W11tozwsxqOwrlkHAzU4W1zQNiCwKJQEnuKgy0U4yZRhRHzUiKjUUdkcVtk2dgHnD3IgV0Q3QryKOO3C0NzSga7OEDKlDNvW5WNdjdABkoG/56DLNF2Hl4RO7G6moZ8cNEW0aSU/joRhsy97GFaZriHro5YR5DlUGKu482GduipqTl18UFoHcbhxHVF0kLaq0QmJls9aLh0I+jZIRQygYCLczCMWksIhcXF1euXrFu67qWWjyiVrHVYgkFlCUefGxEPNKqxiZtCEdjrLokRG5Dtng6HT7+15n2gGSQCfSjgkXZeifYII5RiRI2c4+saZp8iC2E/cRScrbvEoF7LVuYorW2tpUUSzVPorqzSpjz4E5g1VhzqyLNfUTjNtIq906DLUIbJaMjl5Zb2HSsZbSV/07A0k1VH5i9s64c+NqapO7Ze3M0Xobm3J+hl5bb1dUTKZTmxrquiLMJhV6b6C6IG6ewIrMG+TXdLPsv7MPAG7+HmUXtuid5Tx4Mm022c3Rk52VUOrTdugiKFgvyXsDg2ZyGn2c4Ee6aviXg60owYxmnQq5LrujYx6GzZGS3dDReQe4lD+dzfHw8KH+XLGNiPI2zkpngUEnIHi7BV+be1rWosmdyCE/cvEfjmaK9d0+HzW0M3BjeUlW36LEalLTdcnS2bTiBEQjgEATCRsmE7DlwEX3o6Ikn9x3ij5OpqsO35OrEwJmiy/q7zFeFCUCXm2s/Al6+3TvUbO9dVdzEMtQ/jnnkvaEQFdg+P5w4HAYLEcaCxKEc53izwO5zjAcA8FkVAHsCrIZpUwx5ctfaSE5MiXVvnt6ZOyBTLdb7mFgelFWkp9syWI9VtbUWrgc3Gw09c7OuBMw1l0wWcokl4nCWsrYJtkc0pykZU9O5ZVJbRGjtdzMpAQF07iQAiQ99KPa1P6FUWVm5rVEGI5HHbHqfnOvtfPj/AuF8Ss4CuruhAAAAAElFTkSuQmCC\n",
- "text/plain": [
- ""
- ]
- },
- "execution_count": 7,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "physics = mujoco.Physics.from_xml_string(contents)\n",
- "# Visualize the joint axis.\n",
- "scene_option = mujoco.wrapper.core.MjvOption()\n",
- "scene_option.flags[enums.mjtVisFlag.mjVIS_JOINT] = True\n",
- "pixels = physics.render(scene_option=scene_option)\n",
- "PIL.Image.fromarray(pixels)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 9,
- "metadata": {},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "/home/avnish/.local/share/virtualenvs/metaworld-7kyDgMie/lib/python3.7/site-packages/ipykernel/ipkernel.py:287: DeprecationWarning: `should_run_async` will not call `transform_cell` automatically in the future. Please pass the result to `transformed_cell` argument and any exception that happen during thetransform in `preprocessing_exc_tuple` in IPython 7.17 and above.\n",
- " and should_run_async(code)\n"
- ]
- },
- {
- "data": {
- "text/html": [
- ""
- ],
- "text/plain": [
- ""
- ]
- },
- "execution_count": 9,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "duration = 20 # (seconds)\n",
- "framerate = 30 # (Hz)\n",
- "\n",
- "# Visualize the joint axis\n",
- "scene_option = mujoco.wrapper.core.MjvOption()\n",
- "scene_option.flags[enums.mjtVisFlag.mjVIS_JOINT] = True\n",
- "\n",
- "# Simulate and display video.\n",
- "frames = []\n",
- "physics.reset() # Reset state and time\n",
- "while physics.data.time < duration:\n",
- " physics.step()\n",
- " if len(frames) < physics.data.time * framerate:\n",
- " pixels = physics.render(scene_option=scene_option)\n",
- " frames.append(pixels)\n",
- "display_video(frames, framerate)"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": null,
- "metadata": {},
- "outputs": [],
- "source": []
- }
- ],
- "metadata": {
- "kernelspec": {
- "display_name": "Python 3",
- "language": "python",
- "name": "python3"
- },
- "language_info": {
- "codemirror_mode": {
- "name": "ipython",
- "version": 3
- },
- "file_extension": ".py",
- "mimetype": "text/x-python",
- "name": "python",
- "nbconvert_exporter": "python",
- "pygments_lexer": "ipython3",
- "version": "3.7.5"
- }
- },
- "nbformat": 4,
- "nbformat_minor": 4
-}
diff --git a/metaworld/envs/assets_v1/multiobject_models/generate_touch_sensors.py b/metaworld/envs/assets_v1/multiobject_models/generate_touch_sensors.py
index aa0aefb4d..eff5b2812 100644
--- a/metaworld/envs/assets_v1/multiobject_models/generate_touch_sensors.py
+++ b/metaworld/envs/assets_v1/multiobject_models/generate_touch_sensors.py
@@ -44,7 +44,7 @@
f = open("touchsensor.xml", "wb")
-f.write(xml_str)
+f.write(xml_str.encode("utf-8"))
f.close()
diff --git a/metaworld/envs/assets_v2/objects/assets/shelf_dependencies.xml b/metaworld/envs/assets_v2/objects/assets/shelf_dependencies.xml
index eb71a08d0..dd0ace852 100644
--- a/metaworld/envs/assets_v2/objects/assets/shelf_dependencies.xml
+++ b/metaworld/envs/assets_v2/objects/assets/shelf_dependencies.xml
@@ -21,7 +21,7 @@
-
+
diff --git a/metaworld/envs/assets_v2/objects/assets/soccer_ball.xml b/metaworld/envs/assets_v2/objects/assets/soccer_ball.xml
index adeddc0ee..2e8da6925 100644
--- a/metaworld/envs/assets_v2/objects/assets/soccer_ball.xml
+++ b/metaworld/envs/assets_v2/objects/assets/soccer_ball.xml
@@ -2,6 +2,6 @@
-
+
diff --git a/metaworld/envs/assets_v2/objects/assets/stick.xml b/metaworld/envs/assets_v2/objects/assets/stick.xml
index 56dbe7622..1ec99224f 100644
--- a/metaworld/envs/assets_v2/objects/assets/stick.xml
+++ b/metaworld/envs/assets_v2/objects/assets/stick.xml
@@ -1,7 +1,7 @@
-
+
diff --git a/metaworld/envs/assets_v2/sawyer_xyz/sawyer_basketball.xml b/metaworld/envs/assets_v2/sawyer_xyz/sawyer_basketball.xml
index 3997fad41..7f195c010 100644
--- a/metaworld/envs/assets_v2/sawyer_xyz/sawyer_basketball.xml
+++ b/metaworld/envs/assets_v2/sawyer_xyz/sawyer_basketball.xml
@@ -5,11 +5,15 @@
+
+
-
diff --git a/metaworld/envs/mujoco/env_dict.py b/metaworld/envs/mujoco/env_dict.py
index 99cbc53a9..aabdf73fa 100644
--- a/metaworld/envs/mujoco/env_dict.py
+++ b/metaworld/envs/mujoco/env_dict.py
@@ -1,379 +1,149 @@
+"""Dictionaries mapping environment name strings to environment classes,
+and organising them into various collections and splits for the benchmarks."""
+
+from __future__ import annotations
+
import re
from collections import OrderedDict
+from typing import Dict, List, Literal
+from typing import OrderedDict as Typing_OrderedDict
+from typing import Sequence, Union
import numpy as np
+from typing_extensions import TypeAlias
-from metaworld.envs.mujoco.sawyer_xyz.v2 import (
- SawyerBasketballEnvV2,
- SawyerBinPickingEnvV2,
- SawyerBoxCloseEnvV2,
- SawyerButtonPressEnvV2,
- SawyerButtonPressTopdownEnvV2,
- SawyerButtonPressTopdownWallEnvV2,
- SawyerButtonPressWallEnvV2,
- SawyerCoffeeButtonEnvV2,
- SawyerCoffeePullEnvV2,
- SawyerCoffeePushEnvV2,
- SawyerDialTurnEnvV2,
- SawyerDoorCloseEnvV2,
- SawyerDoorEnvV2,
- SawyerDoorLockEnvV2,
- SawyerDoorUnlockEnvV2,
- SawyerDrawerCloseEnvV2,
- SawyerDrawerOpenEnvV2,
- SawyerFaucetCloseEnvV2,
- SawyerFaucetOpenEnvV2,
- SawyerHammerEnvV2,
- SawyerHandInsertEnvV2,
- SawyerHandlePressEnvV2,
- SawyerHandlePressSideEnvV2,
- SawyerHandlePullEnvV2,
- SawyerHandlePullSideEnvV2,
- SawyerLeverPullEnvV2,
- SawyerNutAssemblyEnvV2,
- SawyerNutDisassembleEnvV2,
- SawyerPegInsertionSideEnvV2,
- SawyerPegUnplugSideEnvV2,
- SawyerPickOutOfHoleEnvV2,
- SawyerPickPlaceEnvV2,
- SawyerPickPlaceWallEnvV2,
- SawyerPlateSlideBackEnvV2,
- SawyerPlateSlideBackSideEnvV2,
- SawyerPlateSlideEnvV2,
- SawyerPlateSlideSideEnvV2,
- SawyerPushBackEnvV2,
- SawyerPushEnvV2,
- SawyerPushWallEnvV2,
- SawyerReachEnvV2,
- SawyerReachWallEnvV2,
- SawyerShelfPlaceEnvV2,
- SawyerSoccerEnvV2,
- SawyerStickPullEnvV2,
- SawyerStickPushEnvV2,
- SawyerSweepEnvV2,
- SawyerSweepIntoGoalEnvV2,
- SawyerWindowCloseEnvV2,
- SawyerWindowOpenEnvV2,
-)
-
-ALL_V2_ENVIRONMENTS = OrderedDict(
- (
- ("assembly-v2", SawyerNutAssemblyEnvV2),
- ("basketball-v2", SawyerBasketballEnvV2),
- ("bin-picking-v2", SawyerBinPickingEnvV2),
- ("box-close-v2", SawyerBoxCloseEnvV2),
- ("button-press-topdown-v2", SawyerButtonPressTopdownEnvV2),
- ("button-press-topdown-wall-v2", SawyerButtonPressTopdownWallEnvV2),
- ("button-press-v2", SawyerButtonPressEnvV2),
- ("button-press-wall-v2", SawyerButtonPressWallEnvV2),
- ("coffee-button-v2", SawyerCoffeeButtonEnvV2),
- ("coffee-pull-v2", SawyerCoffeePullEnvV2),
- ("coffee-push-v2", SawyerCoffeePushEnvV2),
- ("dial-turn-v2", SawyerDialTurnEnvV2),
- ("disassemble-v2", SawyerNutDisassembleEnvV2),
- ("door-close-v2", SawyerDoorCloseEnvV2),
- ("door-lock-v2", SawyerDoorLockEnvV2),
- ("door-open-v2", SawyerDoorEnvV2),
- ("door-unlock-v2", SawyerDoorUnlockEnvV2),
- ("hand-insert-v2", SawyerHandInsertEnvV2),
- ("drawer-close-v2", SawyerDrawerCloseEnvV2),
- ("drawer-open-v2", SawyerDrawerOpenEnvV2),
- ("faucet-open-v2", SawyerFaucetOpenEnvV2),
- ("faucet-close-v2", SawyerFaucetCloseEnvV2),
- ("hammer-v2", SawyerHammerEnvV2),
- ("handle-press-side-v2", SawyerHandlePressSideEnvV2),
- ("handle-press-v2", SawyerHandlePressEnvV2),
- ("handle-pull-side-v2", SawyerHandlePullSideEnvV2),
- ("handle-pull-v2", SawyerHandlePullEnvV2),
- ("lever-pull-v2", SawyerLeverPullEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("pick-place-wall-v2", SawyerPickPlaceWallEnvV2),
- ("pick-out-of-hole-v2", SawyerPickOutOfHoleEnvV2),
- ("reach-v2", SawyerReachEnvV2),
- ("push-back-v2", SawyerPushBackEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("pick-place-v2", SawyerPickPlaceEnvV2),
- ("plate-slide-v2", SawyerPlateSlideEnvV2),
- ("plate-slide-side-v2", SawyerPlateSlideSideEnvV2),
- ("plate-slide-back-v2", SawyerPlateSlideBackEnvV2),
- ("plate-slide-back-side-v2", SawyerPlateSlideBackSideEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("peg-unplug-side-v2", SawyerPegUnplugSideEnvV2),
- ("soccer-v2", SawyerSoccerEnvV2),
- ("stick-push-v2", SawyerStickPushEnvV2),
- ("stick-pull-v2", SawyerStickPullEnvV2),
- ("push-wall-v2", SawyerPushWallEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("reach-wall-v2", SawyerReachWallEnvV2),
- ("reach-v2", SawyerReachEnvV2),
- ("shelf-place-v2", SawyerShelfPlaceEnvV2),
- ("sweep-into-v2", SawyerSweepIntoGoalEnvV2),
- ("sweep-v2", SawyerSweepEnvV2),
- ("window-open-v2", SawyerWindowOpenEnvV2),
- ("window-close-v2", SawyerWindowCloseEnvV2),
- )
-)
+from metaworld.envs.mujoco.sawyer_xyz import SawyerXYZEnv, v2
+# Utils
-_NUM_METAWORLD_ENVS = len(ALL_V2_ENVIRONMENTS)
-# V2 DICTS
-
-MT10_V2 = OrderedDict(
- (
- ("reach-v2", SawyerReachEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("pick-place-v2", SawyerPickPlaceEnvV2),
- ("door-open-v2", SawyerDoorEnvV2),
- ("drawer-open-v2", SawyerDrawerOpenEnvV2),
- ("drawer-close-v2", SawyerDrawerCloseEnvV2),
- ("button-press-topdown-v2", SawyerButtonPressTopdownEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("window-open-v2", SawyerWindowOpenEnvV2),
- ("window-close-v2", SawyerWindowCloseEnvV2),
- ),
+EnvDict: TypeAlias = "Typing_OrderedDict[str, type[SawyerXYZEnv]]"
+TrainTestEnvDict: TypeAlias = "Typing_OrderedDict[Literal['train', 'test'], EnvDict]"
+EnvArgsKwargsDict: TypeAlias = (
+ "Dict[str, Dict[Literal['args', 'kwargs'], Union[List, Dict]]]"
)
-
-MT10_V2_ARGS_KWARGS = {
- key: dict(args=[], kwargs={"task_id": list(ALL_V2_ENVIRONMENTS.keys()).index(key)})
- for key, _ in MT10_V2.items()
+ENV_CLS_MAP = {
+ "assembly-v2": v2.SawyerNutAssemblyEnvV2,
+ "basketball-v2": v2.SawyerBasketballEnvV2,
+ "bin-picking-v2": v2.SawyerBinPickingEnvV2,
+ "box-close-v2": v2.SawyerBoxCloseEnvV2,
+ "button-press-topdown-v2": v2.SawyerButtonPressTopdownEnvV2,
+ "button-press-topdown-wall-v2": v2.SawyerButtonPressTopdownWallEnvV2,
+ "button-press-v2": v2.SawyerButtonPressEnvV2,
+ "button-press-wall-v2": v2.SawyerButtonPressWallEnvV2,
+ "coffee-button-v2": v2.SawyerCoffeeButtonEnvV2,
+ "coffee-pull-v2": v2.SawyerCoffeePullEnvV2,
+ "coffee-push-v2": v2.SawyerCoffeePushEnvV2,
+ "dial-turn-v2": v2.SawyerDialTurnEnvV2,
+ "disassemble-v2": v2.SawyerNutDisassembleEnvV2,
+ "door-close-v2": v2.SawyerDoorCloseEnvV2,
+ "door-lock-v2": v2.SawyerDoorLockEnvV2,
+ "door-open-v2": v2.SawyerDoorEnvV2,
+ "door-unlock-v2": v2.SawyerDoorUnlockEnvV2,
+ "hand-insert-v2": v2.SawyerHandInsertEnvV2,
+ "drawer-close-v2": v2.SawyerDrawerCloseEnvV2,
+ "drawer-open-v2": v2.SawyerDrawerOpenEnvV2,
+ "faucet-open-v2": v2.SawyerFaucetOpenEnvV2,
+ "faucet-close-v2": v2.SawyerFaucetCloseEnvV2,
+ "hammer-v2": v2.SawyerHammerEnvV2,
+ "handle-press-side-v2": v2.SawyerHandlePressSideEnvV2,
+ "handle-press-v2": v2.SawyerHandlePressEnvV2,
+ "handle-pull-side-v2": v2.SawyerHandlePullSideEnvV2,
+ "handle-pull-v2": v2.SawyerHandlePullEnvV2,
+ "lever-pull-v2": v2.SawyerLeverPullEnvV2,
+ "peg-insert-side-v2": v2.SawyerPegInsertionSideEnvV2,
+ "pick-place-wall-v2": v2.SawyerPickPlaceWallEnvV2,
+ "pick-out-of-hole-v2": v2.SawyerPickOutOfHoleEnvV2,
+ "reach-v2": v2.SawyerReachEnvV2,
+ "push-back-v2": v2.SawyerPushBackEnvV2,
+ "push-v2": v2.SawyerPushEnvV2,
+ "pick-place-v2": v2.SawyerPickPlaceEnvV2,
+ "plate-slide-v2": v2.SawyerPlateSlideEnvV2,
+ "plate-slide-side-v2": v2.SawyerPlateSlideSideEnvV2,
+ "plate-slide-back-v2": v2.SawyerPlateSlideBackEnvV2,
+ "plate-slide-back-side-v2": v2.SawyerPlateSlideBackSideEnvV2,
+ "peg-unplug-side-v2": v2.SawyerPegUnplugSideEnvV2,
+ "soccer-v2": v2.SawyerSoccerEnvV2,
+ "stick-push-v2": v2.SawyerStickPushEnvV2,
+ "stick-pull-v2": v2.SawyerStickPullEnvV2,
+ "push-wall-v2": v2.SawyerPushWallEnvV2,
+ "reach-wall-v2": v2.SawyerReachWallEnvV2,
+ "shelf-place-v2": v2.SawyerShelfPlaceEnvV2,
+ "sweep-into-v2": v2.SawyerSweepIntoGoalEnvV2,
+ "sweep-v2": v2.SawyerSweepEnvV2,
+ "window-open-v2": v2.SawyerWindowOpenEnvV2,
+ "window-close-v2": v2.SawyerWindowCloseEnvV2,
}
-ML10_V2 = OrderedDict(
- (
- (
- "train",
- OrderedDict(
- (
- ("reach-v2", SawyerReachEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("pick-place-v2", SawyerPickPlaceEnvV2),
- ("door-open-v2", SawyerDoorEnvV2),
- ("drawer-close-v2", SawyerDrawerCloseEnvV2),
- ("button-press-topdown-v2", SawyerButtonPressTopdownEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("window-open-v2", SawyerWindowOpenEnvV2),
- ("sweep-v2", SawyerSweepEnvV2),
- ("basketball-v2", SawyerBasketballEnvV2),
- )
- ),
- ),
- (
- "test",
- OrderedDict(
- (
- ("drawer-open-v2", SawyerDrawerOpenEnvV2),
- ("door-close-v2", SawyerDoorCloseEnvV2),
- ("shelf-place-v2", SawyerShelfPlaceEnvV2),
- ("sweep-into-v2", SawyerSweepIntoGoalEnvV2),
- (
- "lever-pull-v2",
- SawyerLeverPullEnvV2,
- ),
- )
- ),
- ),
- )
-)
-
-ml10_train_args_kwargs = {
- key: dict(
- args=[],
- kwargs={
- "task_id": list(ALL_V2_ENVIRONMENTS.keys()).index(key),
- },
- )
- for key, _ in ML10_V2["train"].items()
-}
+def _get_env_dict(env_names: Sequence[str]) -> EnvDict:
+ """Returns an `OrderedDict` containing `(env_name, env_cls)` tuples for the given env_names.
-ml10_test_args_kwargs = {
- key: dict(args=[], kwargs={"task_id": list(ALL_V2_ENVIRONMENTS.keys()).index(key)})
- for key, _ in ML10_V2["test"].items()
-}
+ Args:
+ env_names: The environment names
-ML10_ARGS_KWARGS = dict(
- train=ml10_train_args_kwargs,
- test=ml10_test_args_kwargs,
-)
+ Returns:
+ The appropriate `OrderedDict.
+ """
+ return OrderedDict([(env_name, ENV_CLS_MAP[env_name]) for env_name in env_names])
-ML1_V2 = OrderedDict((("train", ALL_V2_ENVIRONMENTS), ("test", ALL_V2_ENVIRONMENTS)))
-ML1_args_kwargs = {
- key: dict(
- args=[],
- kwargs={
- "task_id": list(ALL_V2_ENVIRONMENTS.keys()).index(key),
- },
- )
- for key, _ in ML1_V2["train"].items()
-}
-MT50_V2 = OrderedDict(
- (
- ("assembly-v2", SawyerNutAssemblyEnvV2),
- ("basketball-v2", SawyerBasketballEnvV2),
- ("bin-picking-v2", SawyerBinPickingEnvV2),
- ("box-close-v2", SawyerBoxCloseEnvV2),
- ("button-press-topdown-v2", SawyerButtonPressTopdownEnvV2),
- ("button-press-topdown-wall-v2", SawyerButtonPressTopdownWallEnvV2),
- ("button-press-v2", SawyerButtonPressEnvV2),
- ("button-press-wall-v2", SawyerButtonPressWallEnvV2),
- ("coffee-button-v2", SawyerCoffeeButtonEnvV2),
- ("coffee-pull-v2", SawyerCoffeePullEnvV2),
- ("coffee-push-v2", SawyerCoffeePushEnvV2),
- ("dial-turn-v2", SawyerDialTurnEnvV2),
- ("disassemble-v2", SawyerNutDisassembleEnvV2),
- ("door-close-v2", SawyerDoorCloseEnvV2),
- ("door-lock-v2", SawyerDoorLockEnvV2),
- ("door-open-v2", SawyerDoorEnvV2),
- ("door-unlock-v2", SawyerDoorUnlockEnvV2),
- ("hand-insert-v2", SawyerHandInsertEnvV2),
- ("drawer-close-v2", SawyerDrawerCloseEnvV2),
- ("drawer-open-v2", SawyerDrawerOpenEnvV2),
- ("faucet-open-v2", SawyerFaucetOpenEnvV2),
- ("faucet-close-v2", SawyerFaucetCloseEnvV2),
- ("hammer-v2", SawyerHammerEnvV2),
- ("handle-press-side-v2", SawyerHandlePressSideEnvV2),
- ("handle-press-v2", SawyerHandlePressEnvV2),
- ("handle-pull-side-v2", SawyerHandlePullSideEnvV2),
- ("handle-pull-v2", SawyerHandlePullEnvV2),
- ("lever-pull-v2", SawyerLeverPullEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("pick-place-wall-v2", SawyerPickPlaceWallEnvV2),
- ("pick-out-of-hole-v2", SawyerPickOutOfHoleEnvV2),
- ("reach-v2", SawyerReachEnvV2),
- ("push-back-v2", SawyerPushBackEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("pick-place-v2", SawyerPickPlaceEnvV2),
- ("plate-slide-v2", SawyerPlateSlideEnvV2),
- ("plate-slide-side-v2", SawyerPlateSlideSideEnvV2),
- ("plate-slide-back-v2", SawyerPlateSlideBackEnvV2),
- ("plate-slide-back-side-v2", SawyerPlateSlideBackSideEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("peg-unplug-side-v2", SawyerPegUnplugSideEnvV2),
- ("soccer-v2", SawyerSoccerEnvV2),
- ("stick-push-v2", SawyerStickPushEnvV2),
- ("stick-pull-v2", SawyerStickPullEnvV2),
- ("push-wall-v2", SawyerPushWallEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("reach-wall-v2", SawyerReachWallEnvV2),
- ("reach-v2", SawyerReachEnvV2),
- ("shelf-place-v2", SawyerShelfPlaceEnvV2),
- ("sweep-into-v2", SawyerSweepIntoGoalEnvV2),
- ("sweep-v2", SawyerSweepEnvV2),
- ("window-open-v2", SawyerWindowOpenEnvV2),
- ("window-close-v2", SawyerWindowCloseEnvV2),
- )
-)
+def _get_train_test_env_dict(
+ train_env_names: Sequence[str], test_env_names: Sequence[str]
+) -> TrainTestEnvDict:
+ """Returns an `OrderedDict` containing two sub-keys ("train" and "test" at positions 0 and 1),
+ each containing the appropriate `OrderedDict` for the train and test classes of the benchmark.
-MT50_V2_ARGS_KWARGS = {
- key: dict(args=[], kwargs={"task_id": list(ALL_V2_ENVIRONMENTS.keys()).index(key)})
- for key, _ in MT50_V2.items()
-}
+ Args:
+ train_env_names: The train environment names.
+ test_env_names: The test environment names
-ML45_V2 = OrderedDict(
- (
+ Returns:
+ The appropriate `OrderedDict`.
+ """
+ return OrderedDict(
(
- "train",
- OrderedDict(
- (
- ("assembly-v2", SawyerNutAssemblyEnvV2),
- ("basketball-v2", SawyerBasketballEnvV2),
- ("button-press-topdown-v2", SawyerButtonPressTopdownEnvV2),
- ("button-press-topdown-wall-v2", SawyerButtonPressTopdownWallEnvV2),
- ("button-press-v2", SawyerButtonPressEnvV2),
- ("button-press-wall-v2", SawyerButtonPressWallEnvV2),
- ("coffee-button-v2", SawyerCoffeeButtonEnvV2),
- ("coffee-pull-v2", SawyerCoffeePullEnvV2),
- ("coffee-push-v2", SawyerCoffeePushEnvV2),
- ("dial-turn-v2", SawyerDialTurnEnvV2),
- ("disassemble-v2", SawyerNutDisassembleEnvV2),
- ("door-close-v2", SawyerDoorCloseEnvV2),
- ("door-open-v2", SawyerDoorEnvV2),
- ("drawer-close-v2", SawyerDrawerCloseEnvV2),
- ("drawer-open-v2", SawyerDrawerOpenEnvV2),
- ("faucet-open-v2", SawyerFaucetOpenEnvV2),
- ("faucet-close-v2", SawyerFaucetCloseEnvV2),
- ("hammer-v2", SawyerHammerEnvV2),
- ("handle-press-side-v2", SawyerHandlePressSideEnvV2),
- ("handle-press-v2", SawyerHandlePressEnvV2),
- ("handle-pull-side-v2", SawyerHandlePullSideEnvV2),
- ("handle-pull-v2", SawyerHandlePullEnvV2),
- ("lever-pull-v2", SawyerLeverPullEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("pick-place-wall-v2", SawyerPickPlaceWallEnvV2),
- ("pick-out-of-hole-v2", SawyerPickOutOfHoleEnvV2),
- ("reach-v2", SawyerReachEnvV2),
- ("push-back-v2", SawyerPushBackEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("pick-place-v2", SawyerPickPlaceEnvV2),
- ("plate-slide-v2", SawyerPlateSlideEnvV2),
- ("plate-slide-side-v2", SawyerPlateSlideSideEnvV2),
- ("plate-slide-back-v2", SawyerPlateSlideBackEnvV2),
- ("plate-slide-back-side-v2", SawyerPlateSlideBackSideEnvV2),
- ("peg-insert-side-v2", SawyerPegInsertionSideEnvV2),
- ("peg-unplug-side-v2", SawyerPegUnplugSideEnvV2),
- ("soccer-v2", SawyerSoccerEnvV2),
- ("stick-push-v2", SawyerStickPushEnvV2),
- ("stick-pull-v2", SawyerStickPullEnvV2),
- ("push-wall-v2", SawyerPushWallEnvV2),
- ("push-v2", SawyerPushEnvV2),
- ("reach-wall-v2", SawyerReachWallEnvV2),
- ("reach-v2", SawyerReachEnvV2),
- ("shelf-place-v2", SawyerShelfPlaceEnvV2),
- ("sweep-into-v2", SawyerSweepIntoGoalEnvV2),
- ("sweep-v2", SawyerSweepEnvV2),
- ("window-open-v2", SawyerWindowOpenEnvV2),
- ("window-close-v2", SawyerWindowCloseEnvV2),
- )
- ),
- ),
- (
- "test",
- OrderedDict(
- (
- ("bin-picking-v2", SawyerBinPickingEnvV2),
- ("box-close-v2", SawyerBoxCloseEnvV2),
- ("hand-insert-v2", SawyerHandInsertEnvV2),
- ("door-lock-v2", SawyerDoorLockEnvV2),
- ("door-unlock-v2", SawyerDoorUnlockEnvV2),
- )
- ),
- ),
+ ("train", _get_env_dict(train_env_names)),
+ ("test", _get_env_dict(test_env_names)),
+ )
)
-)
-ml45_train_args_kwargs = {
- key: dict(
- args=[],
- kwargs={
- "task_id": list(ALL_V2_ENVIRONMENTS.keys()).index(key),
- },
- )
- for key, _ in ML45_V2["train"].items()
-}
-ml45_test_args_kwargs = {
- key: dict(args=[], kwargs={"task_id": list(ALL_V2_ENVIRONMENTS.keys()).index(key)})
- for key, _ in ML45_V2["test"].items()
-}
+def _get_args_kwargs(all_envs: EnvDict, env_subset: EnvDict) -> EnvArgsKwargsDict:
+ """Returns containing a `dict` of "args" and "kwargs" for each environment in a given list of environments.
+ Specifically, sets an empty "args" array and a "kwargs" dictionary with a "task_id" key for each env.
-ML45_ARGS_KWARGS = dict(
- train=ml45_train_args_kwargs,
- test=ml45_test_args_kwargs,
-)
+ Args:
+ all_envs: The full list of envs
+ env_subset: The subset of envs to get args and kwargs for
+
+ Returns:
+ The args and kwargs dictionary.
+ """
+ return {
+ key: dict(args=[], kwargs={"task_id": list(all_envs.keys()).index(key)})
+ for key, _ in env_subset.items()
+ }
+
+
+def _create_hidden_goal_envs(all_envs: EnvDict) -> EnvDict:
+ """Create versions of the environments with the goal hidden.
+ Args:
+ all_envs: The full list of envs in the benchmark.
-def create_hidden_goal_envs():
+ Returns:
+ An `EnvDict` where the classes have been modified to hide the goal.
+ """
hidden_goal_envs = {}
- for env_name, env_cls in ALL_V2_ENVIRONMENTS.items():
+ for env_name, env_cls in all_envs.items():
d = {}
- def initialize(env, seed=None):
+ def initialize(env, seed=None, **render_kwargs):
if seed is not None:
st0 = np.random.get_state()
np.random.seed(seed)
- super(type(env), env).__init__()
+ super(type(env), env).__init__(**render_kwargs)
env._partially_observable = True
env._freeze_rand_vec = False
env._set_task_called = True
@@ -396,27 +166,33 @@ def initialize(env, seed=None):
return OrderedDict(hidden_goal_envs)
-def create_observable_goal_envs():
+def _create_observable_goal_envs(all_envs: EnvDict) -> EnvDict:
+ """Create versions of the environments with the goal observable.
+
+ Args:
+ all_envs: The full list of envs in the benchmark.
+
+ Returns:
+ An `EnvDict` where the classes have been modified to make the goal observable.
+ """
observable_goal_envs = {}
- for env_name, env_cls in ALL_V2_ENVIRONMENTS.items():
+ for env_name, env_cls in all_envs.items():
d = {}
- def initialize(env, seed=None, render_mode=None):
+ def initialize(env, seed=None, **render_kwargs):
if seed is not None:
st0 = np.random.get_state()
np.random.seed(seed)
- super(type(env), env).__init__()
-
+ super(type(env), env).__init__(**render_kwargs)
env._partially_observable = False
env._freeze_rand_vec = False
env._set_task_called = True
- env.render_mode = render_mode
env.reset()
env._freeze_rand_vec = True
if seed is not None:
env.seed(seed)
np.random.set_state(st0)
-
+
d["__init__"] = initialize
og_env_name = re.sub(
r"(^|[-])\s*([a-zA-Z])", lambda p: p.group(0).upper(), env_name
@@ -431,5 +207,178 @@ def initialize(env, seed=None, render_mode=None):
return OrderedDict(observable_goal_envs)
-ALL_V2_ENVIRONMENTS_GOAL_HIDDEN = create_hidden_goal_envs()
-ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE = create_observable_goal_envs()
+# V2 DICTS
+
+ALL_V2_ENVIRONMENTS = _get_env_dict(
+ [
+ "assembly-v2",
+ "basketball-v2",
+ "bin-picking-v2",
+ "box-close-v2",
+ "button-press-topdown-v2",
+ "button-press-topdown-wall-v2",
+ "button-press-v2",
+ "button-press-wall-v2",
+ "coffee-button-v2",
+ "coffee-pull-v2",
+ "coffee-push-v2",
+ "dial-turn-v2",
+ "disassemble-v2",
+ "door-close-v2",
+ "door-lock-v2",
+ "door-open-v2",
+ "door-unlock-v2",
+ "hand-insert-v2",
+ "drawer-close-v2",
+ "drawer-open-v2",
+ "faucet-open-v2",
+ "faucet-close-v2",
+ "hammer-v2",
+ "handle-press-side-v2",
+ "handle-press-v2",
+ "handle-pull-side-v2",
+ "handle-pull-v2",
+ "lever-pull-v2",
+ "pick-place-wall-v2",
+ "pick-out-of-hole-v2",
+ "pick-place-v2",
+ "plate-slide-v2",
+ "plate-slide-side-v2",
+ "plate-slide-back-v2",
+ "plate-slide-back-side-v2",
+ "peg-insert-side-v2",
+ "peg-unplug-side-v2",
+ "soccer-v2",
+ "stick-push-v2",
+ "stick-pull-v2",
+ "push-v2",
+ "push-wall-v2",
+ "push-back-v2",
+ "reach-v2",
+ "reach-wall-v2",
+ "shelf-place-v2",
+ "sweep-into-v2",
+ "sweep-v2",
+ "window-open-v2",
+ "window-close-v2",
+ ]
+)
+
+
+ALL_V2_ENVIRONMENTS_GOAL_HIDDEN = _create_hidden_goal_envs(ALL_V2_ENVIRONMENTS)
+ALL_V2_ENVIRONMENTS_GOAL_OBSERVABLE = _create_observable_goal_envs(ALL_V2_ENVIRONMENTS)
+
+# MT Dicts
+
+MT10_V2 = _get_env_dict(
+ [
+ "reach-v2",
+ "push-v2",
+ "pick-place-v2",
+ "door-open-v2",
+ "drawer-open-v2",
+ "drawer-close-v2",
+ "button-press-topdown-v2",
+ "peg-insert-side-v2",
+ "window-open-v2",
+ "window-close-v2",
+ ]
+)
+MT10_V2_ARGS_KWARGS = _get_args_kwargs(ALL_V2_ENVIRONMENTS, MT10_V2)
+
+MT50_V2 = ALL_V2_ENVIRONMENTS
+MT50_V2_ARGS_KWARGS = _get_args_kwargs(ALL_V2_ENVIRONMENTS, MT50_V2)
+
+# ML Dicts
+
+ML1_V2 = _get_train_test_env_dict(
+ list(ALL_V2_ENVIRONMENTS.keys()), list(ALL_V2_ENVIRONMENTS.keys())
+)
+ML1_args_kwargs = _get_args_kwargs(ALL_V2_ENVIRONMENTS, ML1_V2["train"])
+
+ML10_V2 = _get_train_test_env_dict(
+ train_env_names=[
+ "reach-v2",
+ "push-v2",
+ "pick-place-v2",
+ "door-open-v2",
+ "drawer-close-v2",
+ "button-press-topdown-v2",
+ "peg-insert-side-v2",
+ "window-open-v2",
+ "sweep-v2",
+ "basketball-v2",
+ ],
+ test_env_names=[
+ "drawer-open-v2",
+ "door-close-v2",
+ "shelf-place-v2",
+ "sweep-into-v2",
+ "lever-pull-v2",
+ ],
+)
+ML10_ARGS_KWARGS = {
+ "train": _get_args_kwargs(ALL_V2_ENVIRONMENTS, ML10_V2["train"]),
+ "test": _get_args_kwargs(ALL_V2_ENVIRONMENTS, ML10_V2["test"]),
+}
+
+ML45_V2 = _get_train_test_env_dict(
+ train_env_names=[
+ "assembly-v2",
+ "basketball-v2",
+ "button-press-topdown-v2",
+ "button-press-topdown-wall-v2",
+ "button-press-v2",
+ "button-press-wall-v2",
+ "coffee-button-v2",
+ "coffee-pull-v2",
+ "coffee-push-v2",
+ "dial-turn-v2",
+ "disassemble-v2",
+ "door-close-v2",
+ "door-open-v2",
+ "drawer-close-v2",
+ "drawer-open-v2",
+ "faucet-open-v2",
+ "faucet-close-v2",
+ "hammer-v2",
+ "handle-press-side-v2",
+ "handle-press-v2",
+ "handle-pull-side-v2",
+ "handle-pull-v2",
+ "lever-pull-v2",
+ "pick-place-wall-v2",
+ "pick-out-of-hole-v2",
+ "push-back-v2",
+ "pick-place-v2",
+ "plate-slide-v2",
+ "plate-slide-side-v2",
+ "plate-slide-back-v2",
+ "plate-slide-back-side-v2",
+ "peg-insert-side-v2",
+ "peg-unplug-side-v2",
+ "soccer-v2",
+ "stick-push-v2",
+ "stick-pull-v2",
+ "push-wall-v2",
+ "push-v2",
+ "reach-wall-v2",
+ "reach-v2",
+ "shelf-place-v2",
+ "sweep-into-v2",
+ "sweep-v2",
+ "window-open-v2",
+ "window-close-v2",
+ ],
+ test_env_names=[
+ "bin-picking-v2",
+ "box-close-v2",
+ "hand-insert-v2",
+ "door-lock-v2",
+ "door-unlock-v2",
+ ],
+)
+ML45_ARGS_KWARGS = {
+ "train": _get_args_kwargs(ALL_V2_ENVIRONMENTS, ML45_V2["train"]),
+ "test": _get_args_kwargs(ALL_V2_ENVIRONMENTS, ML45_V2["test"]),
+}
diff --git a/metaworld/envs/mujoco/mujoco_env.py b/metaworld/envs/mujoco/mujoco_env.py
deleted file mode 100644
index 60725666f..000000000
--- a/metaworld/envs/mujoco/mujoco_env.py
+++ /dev/null
@@ -1,10 +0,0 @@
-def _assert_task_is_set(func):
- def inner(*args, **kwargs):
- env = args[0]
- if not env._set_task_called:
- raise RuntimeError(
- "You must call env.set_task before using env." + func.__name__
- )
- return func(*args, **kwargs)
-
- return inner
diff --git a/metaworld/envs/mujoco/sawyer_xyz/__init__.py b/metaworld/envs/mujoco/sawyer_xyz/__init__.py
index e69de29bb..07aa8be38 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/__init__.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/__init__.py
@@ -0,0 +1,5 @@
+from .sawyer_xyz_env import SawyerXYZEnv
+
+__all__ = [
+ "SawyerXYZEnv",
+]
diff --git a/metaworld/envs/mujoco/sawyer_xyz/sawyer_xyz_env.py b/metaworld/envs/mujoco/sawyer_xyz/sawyer_xyz_env.py
index 211770656..a50d1495e 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/sawyer_xyz_env.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/sawyer_xyz_env.py
@@ -1,15 +1,24 @@
+"""Base classes for all the envs."""
+
+from __future__ import annotations
+
import copy
import pickle
+from typing import Any, Callable, Literal, SupportsFloat
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.envs.mujoco import MujocoEnv as mjenv_gym
-from gymnasium.spaces import Box, Discrete
+from gymnasium.spaces import Box, Discrete, Space
from gymnasium.utils import seeding
from gymnasium.utils.ezpickle import EzPickle
+from typing_extensions import TypeAlias
+
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import XYZ, EnvironmentStateDict, ObservationDict, Task
-from metaworld.envs import reward_utils
-from metaworld.envs.mujoco.mujoco_env import _assert_task_is_set
+RenderMode: TypeAlias = "Literal['human', 'rgb_array', 'depth_array']"
class SawyerMocapBase(mjenv_gym):
@@ -26,57 +35,83 @@ class SawyerMocapBase(mjenv_gym):
"render_fps": 80,
}
+ @property
+ def sawyer_observation_space(self) -> Space:
+ raise NotImplementedError
+
def __init__(
self,
- model_name,
- frame_skip=5,
- render_mode=None,
- camera_name=None,
- camera_id=None,
- ):
+ model_name: str,
+ frame_skip: int = 5,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
mjenv_gym.__init__(
self,
model_name,
frame_skip=frame_skip,
observation_space=self.sawyer_observation_space,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
self.reset_mocap_welds()
self.frame_skip = frame_skip
- def get_endeff_pos(self):
+ def get_endeff_pos(self) -> npt.NDArray[Any]:
+ """Returns the position of the end effector."""
return self.data.body("hand").xpos
@property
- def tcp_center(self):
+ def tcp_center(self) -> npt.NDArray[Any]:
"""The COM of the gripper's 2 fingers.
Returns:
- (np.ndarray): 3-element position
+ 3-element position.
"""
right_finger_pos = self.data.site("rightEndEffector")
left_finger_pos = self.data.site("leftEndEffector")
tcp_center = (right_finger_pos.xpos + left_finger_pos.xpos) / 2.0
return tcp_center
- def get_env_state(self):
+ @property
+ def model_name(self) -> str:
+ raise NotImplementedError
+
+ def get_env_state(self) -> tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]:
+ """Get the environment state.
+
+ Returns:
+ A tuple of (qpos, qvel).
+ """
qpos = np.copy(self.data.qpos)
qvel = np.copy(self.data.qvel)
return copy.deepcopy((qpos, qvel))
- def set_env_state(self, state):
+ def set_env_state(
+ self, state: tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]
+ ) -> None:
+ """
+ Set the environment state.
+
+ Args:
+ state: A tuple of (qpos, qvel).
+ """
mocap_pos, mocap_quat = state
self.set_state(mocap_pos, mocap_quat)
- def __getstate__(self):
+ def __getstate__(self) -> EnvironmentStateDict:
+ """Returns the full state of the environment as a dict.
+
+ Returns:
+ A dictionary containing the env state from the `__dict__` method, the model name (path) and the mocap state `(qpos, qvel)`.
+ """
state = self.__dict__.copy()
- # del state['model']
- # del state['data']
return {"state": state, "mjb": self.model_name, "mocap": self.get_env_state()}
- def __setstate__(self, state):
+ def __setstate__(self, state: EnvironmentStateDict) -> None:
+ """Sets the state of the environment from a dict exported through `__getstate__()`.
+
+ Args:
+ state: A dictionary containing the env state from the `__dict__` method, the model name (path) and the mocap state `(qpos, qvel)`.
+ """
self.__dict__ = state["state"]
mjenv_gym.__init__(
self,
@@ -86,45 +121,59 @@ def __setstate__(self, state):
)
self.set_env_state(state["mocap"])
- def reset_mocap_welds(self):
+ def reset_mocap_welds(self) -> None:
"""Resets the mocap welds that we use for actuation."""
if self.model.nmocap > 0 and self.model.eq_data is not None:
for i in range(self.model.eq_data.shape[0]):
if self.model.eq_type[i] == mujoco.mjtEq.mjEQ_WELD:
self.model.eq_data[i] = np.array(
- [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 5.0]
)
class SawyerXYZEnv(SawyerMocapBase, EzPickle):
+ """The base environment for all Sawyer Mujoco envs that use mocap for XYZ control."""
+
_HAND_SPACE = Box(
np.array([-0.525, 0.348, -0.0525]),
np.array([+0.525, 1.025, 0.7]),
dtype=np.float64,
)
- max_path_length = 500
+ """Bounds for hand position."""
- TARGET_RADIUS = 0.05
+ max_path_length: int = 500
+ """The maximum path length for the environment (the task horizon)."""
- current_task = 0
- classes = None
- classes_kwargs = None
- tasks = None
+ TARGET_RADIUS: float = 0.05
+ """Upper bound for distance from the target when checking for task completion."""
+
+ class _Decorators:
+ @classmethod
+ def assert_task_is_set(cls, func: Callable) -> Callable:
+ """Asserts that the task has been set in the environment before proceeding with the function call.
+ To be used as a decorator for SawyerXYZEnv methods."""
+
+ def inner(*args, **kwargs) -> Any:
+ env = args[0]
+ if not env._set_task_called:
+ raise RuntimeError(
+ "You must call env.set_task before using env." + func.__name__
+ )
+ return func(*args, **kwargs)
+
+ return inner
def __init__(
self,
- model_name,
- frame_skip=5,
- hand_low=(-0.2, 0.55, 0.05),
- hand_high=(0.2, 0.75, 0.3),
- mocap_low=None,
- mocap_high=None,
- action_scale=1.0 / 100,
- action_rot_scale=1.0,
- render_mode=None,
- camera_id=None,
- camera_name=None,
- ):
+ frame_skip: int = 5,
+ hand_low: XYZ = (-0.2, 0.55, 0.05),
+ hand_high: XYZ = (0.2, 0.75, 0.3),
+ mocap_low: XYZ | None = None,
+ mocap_high: XYZ | None = None,
+ action_scale: float = 1.0 / 100,
+ action_rot_scale: float = 1.0,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
self.action_scale = action_scale
self.action_rot_scale = action_rot_scale
self.hand_low = np.array(hand_low)
@@ -135,65 +184,60 @@ def __init__(
mocap_high = hand_high
self.mocap_low = np.hstack(mocap_low)
self.mocap_high = np.hstack(mocap_high)
- self.curr_path_length = 0
- self.seeded_rand_vec = False
- self._freeze_rand_vec = True
- self._last_rand_vec = None
- self.num_resets = 0
- self.current_seed = None
+ self.curr_path_length: int = 0
+ self.seeded_rand_vec: bool = False
+ self._freeze_rand_vec: bool = True
+ self._last_rand_vec: npt.NDArray[Any] | None = None
+ self.num_resets: int = 0
+ self.current_seed: int | None = None
+ self.obj_init_pos: npt.NDArray[Any] | None = None
- # We use continuous goal space by default and
- # can discretize the goal space by calling
- # the `discretize_goal_space` method.
- self.discrete_goal_space = None
- self.discrete_goals = []
- self.active_discrete_goal = None
+ # TODO Probably needs to be removed
+ self.discrete_goal_space: Box | None = None
+ self.discrete_goals: list = []
+ self.active_discrete_goal: int | None = None
- self._partially_observable = True
+ self._partially_observable: bool = True
super().__init__(
- model_name,
+ self.model_name,
frame_skip=frame_skip,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
mujoco.mj_forward(
self.model, self.data
) # *** DO NOT REMOVE: EZPICKLE WON'T WORK *** #
- self._did_see_sim_exception = False
- self.init_left_pad = self.get_body_com("leftpad")
- self.init_right_pad = self.get_body_com("rightpad")
+ self._did_see_sim_exception: bool = False
+ self.init_left_pad: npt.NDArray[Any] = self.get_body_com("leftpad")
+ self.init_right_pad: npt.NDArray[Any] = self.get_body_com("rightpad")
- self.action_space = Box(
+ self.action_space = Box( # type: ignore
np.array([-1, -1, -1, -1]),
np.array([+1, +1, +1, +1]),
- dtype=np.float64,
+ dtype=np.float32,
)
+ self._obs_obj_max_len: int = 14
+ self._set_task_called: bool = False
+ self.hand_init_pos: npt.NDArray[Any] | None = None # OVERRIDE ME
+ self._target_pos: npt.NDArray[Any] | None = None # OVERRIDE ME
+ self._random_reset_space: Box | None = None # OVERRIDE ME
+ self.goal_space: Box | None = None # OVERRIDE ME
+ self._last_stable_obs: npt.NDArray[np.float64] | None = None
- # Technically these observation lengths are different between v1 and v2,
- # but we handle that elsewhere and just stick with v2 numbers here
- self._obs_obj_max_len = 14
-
- self._set_task_called = False
-
- self.hand_init_pos = None # OVERRIDE ME
- self._target_pos = None # OVERRIDE ME
- self._random_reset_space = None # OVERRIDE ME
-
- self._last_stable_obs = None
# Note: It is unlikely that the positions and orientations stored
# in this initiation of _prev_obs are correct. That being said, it
# doesn't seem to matter (it will only effect frame-stacking for the
# very first observation)
+ self.init_qpos = np.copy(self.data.qpos)
+ self.init_qvel = np.copy(self.data.qvel)
self._prev_obs = self._get_curr_obs_combined_no_goal()
EzPickle.__init__(
self,
- model_name,
+ self.model_name,
frame_skip,
hand_low,
hand_high,
@@ -203,25 +247,39 @@ def __init__(
action_rot_scale,
)
- def seed(self, seed):
+ def seed(self, seed: int) -> list[int]:
+ """Seeds the environment.
+
+ Args:
+ seed: The seed to use.
+
+ Returns:
+ The seed used inside a 1 element list.
+ """
assert seed is not None
self.np_random, seed = seeding.np_random(seed)
self.action_space.seed(seed)
self.observation_space.seed(seed)
+ assert self.goal_space
self.goal_space.seed(seed)
return [seed]
@staticmethod
- def _set_task_inner():
+ def _set_task_inner() -> None:
+ """Helper method to set additional task data. To be overridden by subclasses as appropriate."""
# Doesn't absorb "extra" kwargs, to ensure nothing's missed.
pass
- def set_task(self, task):
+ def set_task(self, task: Task) -> None:
+ """Sets the environment's task.
+
+ Args:
+ task: The task to set.
+ """
self._set_task_called = True
data = pickle.loads(task.data)
assert isinstance(self, data["env_cls"])
del data["env_cls"]
- self._last_rand_vec = data["rand_vec"]
self._freeze_rand_vec = True
self._last_rand_vec = data["rand_vec"]
del data["rand_vec"]
@@ -229,7 +287,13 @@ def set_task(self, task):
del data["partially_observable"]
self._set_task_inner(**data)
- def set_xyz_action(self, action):
+ def set_xyz_action(self, action: npt.NDArray[Any]) -> None:
+ """Adjusts the position of the mocap body from the given action.
+ Moves each body axis in XYZ by the amount described by the action.
+
+ Args:
+ action: The action to apply (in offsets between :math:`[-1, 1]` for each axis in XYZ).
+ """
action = np.clip(action, -1, 1)
pos_delta = action * self.action_scale
new_mocap_pos = self.data.mocap_pos + pos_delta[None]
@@ -241,64 +305,77 @@ def set_xyz_action(self, action):
self.data.mocap_pos = new_mocap_pos
self.data.mocap_quat = np.array([1, 0, 1, 0])
- def discretize_goal_space(self, goals):
- assert False
+ def discretize_goal_space(self, goals: list) -> None:
+ """Discretizes the goal space into a Discrete space.
+ Current disabled and callign it will stop execution.
+
+ Args:
+ goals: List of goals to discretize
+ """
+ assert False, "Discretization is not supported at the moment."
assert len(goals) >= 1
self.discrete_goals = goals
# update the goal_space to a Discrete space
self.discrete_goal_space = Discrete(len(self.discrete_goals))
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
+ """Sets the position of the object.
+
+ Args:
+ pos: The position to set as a numpy array of 3 elements (XYZ value).
+ """
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
- def _get_site_pos(self, siteName):
- _id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, siteName)
- return self.data.site_xpos[_id].copy()
+ def _get_site_pos(self, site_name: str) -> npt.NDArray[np.float64]:
+ """Gets the position of a given site.
+
+ Args:
+ site_name: The name of the site to get the position of.
+
+ Returns:
+ Flat, 3 element array indicating site's location.
+ """
+ return self.data.site(site_name).xpos.copy()
- def _set_pos_site(self, name, pos):
- """Sets the position of the site corresponding to `name`.
+ def _set_pos_site(self, name: str, pos: npt.NDArray[Any]) -> None:
+ """Sets the position of a given site.
Args:
- name (str): The site's name
- pos (np.ndarray): Flat, 3 element array indicating site's location
+ name: The site's name
+ pos: Flat, 3 element array indicating site's location
"""
assert isinstance(pos, np.ndarray)
assert pos.ndim == 1
- _id = mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, name)
- self.data.site_xpos[_id] = pos[:3]
+ self.data.site(name).xpos = pos[:3]
@property
- def _target_site_config(self):
- """Retrieves site name(s) and position(s) corresponding to env targets.
-
- :rtype: list of (str, np.ndarray)
- """
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ """Retrieves site name(s) and position(s) corresponding to env targets."""
+ assert self._target_pos is not None
return [("goal", self._target_pos)]
@property
- def touching_main_object(self):
+ def touching_main_object(self) -> bool:
"""Calls `touching_object` for the ID of the env's main object.
Returns:
- (bool) whether the gripper is touching the object
-
+ Whether the gripper is touching the object
"""
- return self.touching_object(self._get_id_main_object)
+ return self.touching_object(self._get_id_main_object())
- def touching_object(self, object_geom_id):
+ def touching_object(self, object_geom_id: int) -> bool:
"""Determines whether the gripper is touching the object with given id.
Args:
- object_geom_id (int): the ID of the object in question
+ object_geom_id: the ID of the object in question
Returns:
- (bool): whether the gripper is touching the object
-
+ Whether the gripper is touching the object
"""
leftpad_geom_id = self.data.geom("leftpad_geom").id
@@ -306,7 +383,7 @@ def touching_object(self, object_geom_id):
leftpad_object_contacts = [
x
- for x in self.unwrapped.data.contact
+ for x in self.data.contact
if (
leftpad_geom_id in (x.geom1, x.geom2)
and object_geom_id in (x.geom1, x.geom2)
@@ -315,7 +392,7 @@ def touching_object(self, object_geom_id):
rightpad_object_contacts = [
x
- for x in self.unwrapped.data.contact
+ for x in self.data.contact
if (
rightpad_geom_id in (x.geom1, x.geom2)
and object_geom_id in (x.geom1, x.geom2)
@@ -323,64 +400,55 @@ def touching_object(self, object_geom_id):
]
leftpad_object_contact_force = sum(
- self.unwrapped.data.efc_force[x.efc_address]
- for x in leftpad_object_contacts
+ self.data.efc_force[x.efc_address] for x in leftpad_object_contacts
)
rightpad_object_contact_force = sum(
- self.unwrapped.data.efc_force[x.efc_address]
- for x in rightpad_object_contacts
+ self.data.efc_force[x.efc_address] for x in rightpad_object_contacts
)
return 0 < leftpad_object_contact_force and 0 < rightpad_object_contact_force
- @property
- def _get_id_main_object(self):
- return self.data.geom(
- "objGeom"
- ).id # [mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_GEOM, 'objGeom')]
+ def _get_id_main_object(self) -> int:
+ return self.data.geom("objGeom").id
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
"""Retrieves object position(s) from mujoco properties or instance vars.
Returns:
- np.ndarray: Flat array (usually 3 elements) representing the
- object(s)' position(s)
+ Flat array (usually 3 elements) representing the object(s)' position(s)
"""
# Throw error rather than making this an @abc.abstractmethod so that
# V1 environments don't have to implement it
raise NotImplementedError
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
"""Retrieves object quaternion(s) from mujoco properties.
Returns:
- np.ndarray: Flat array (usually 4 elements) representing the
- object(s)' quaternion(s)
-
+ Flat array (usually 4 elements) representing the object(s)' quaternion(s)
"""
# Throw error rather than making this an @abc.abstractmethod so that
# V1 environments don't have to implement it
raise NotImplementedError
- def _get_pos_goal(self):
+ def _get_pos_goal(self) -> npt.NDArray[Any]:
"""Retrieves goal position from mujoco properties or instance vars.
Returns:
- np.ndarray: Flat array (3 elements) representing the goal position
+ Flat array (3 elements) representing the goal position
"""
assert isinstance(self._target_pos, np.ndarray)
assert self._target_pos.ndim == 1
return self._target_pos
- def _get_curr_obs_combined_no_goal(self):
+ def _get_curr_obs_combined_no_goal(self) -> npt.NDArray[np.float64]:
"""Combines the end effector's {pos, closed amount} and the object(s)' {pos, quat} into a single flat observation.
Note: The goal's position is *not* included in this.
Returns:
- np.ndarray: The flat observation array (18 elements)
-
+ The flat observation array (18 elements)
"""
pos_hand = self.get_endeff_pos()
@@ -412,11 +480,11 @@ def _get_curr_obs_combined_no_goal(self):
)
return np.hstack((pos_hand, gripper_distance_apart, obs_obj_padded))
- def _get_obs(self):
+ def _get_obs(self) -> npt.NDArray[np.float64]:
"""Frame stacks `_get_curr_obs_combined_no_goal()` and concatenates the goal position to form a single flat observation.
Returns:
- np.ndarray: The flat observation array (39 elements)
+ The flat observation array (39 elements)
"""
# do frame stacking
pos_goal = self._get_pos_goal()
@@ -428,7 +496,7 @@ def _get_obs(self):
self._prev_obs = curr_obs
return obs
- def _get_obs_dict(self):
+ def _get_obs_dict(self) -> ObservationDict:
obs = self._get_obs()
return dict(
state_observation=obs,
@@ -437,12 +505,19 @@ def _get_obs_dict(self):
)
@property
- def sawyer_observation_space(self):
+ def sawyer_observation_space(self) -> Box:
obs_obj_max_len = 14
obj_low = np.full(obs_obj_max_len, -np.inf, dtype=np.float64)
obj_high = np.full(obs_obj_max_len, +np.inf, dtype=np.float64)
- goal_low = np.zeros(3) if self._partially_observable else self.goal_space.low
- goal_high = np.zeros(3) if self._partially_observable else self.goal_space.high
+ if self._partially_observable:
+ goal_low = np.zeros(3)
+ goal_high = np.zeros(3)
+ else:
+ assert (
+ self.goal_space is not None
+ ), "The goal space must be defined to use full observability"
+ goal_low = self.goal_space.low
+ goal_high = self.goal_space.high
gripper_low = -1.0
gripper_high = +1.0
return Box(
@@ -471,8 +546,18 @@ def sawyer_observation_space(self):
dtype=np.float64,
)
- @_assert_task_is_set
- def step(self, action):
+ @_Decorators.assert_task_is_set
+ def step(
+ self, action: npt.NDArray[np.float32]
+ ) -> tuple[npt.NDArray[np.float64], SupportsFloat, bool, bool, dict[str, Any]]:
+ """Step the environment.
+
+ Args:
+ action: The action to take. Must be a 4 element array of floats.
+
+ Returns:
+ The (next_obs, reward, terminated, truncated, info) tuple.
+ """
assert len(action) == 4, f"Actions should be size 4, got {len(action)}"
self.set_xyz_action(action[:3])
if self.curr_path_length >= self.max_path_length:
@@ -486,6 +571,7 @@ def step(self, action):
self._set_pos_site(*site)
if self._did_see_sim_exception:
+ assert self._last_stable_obs is not None
return (
self._last_stable_obs, # observation just before going unstable
0.0, # reward (penalize for causing instability)
@@ -510,6 +596,7 @@ def step(self, action):
a_min=self.sawyer_observation_space.low,
dtype=np.float64,
)
+ assert isinstance(self._last_stable_obs, np.ndarray)
reward, info = self.evaluate_state(self._last_stable_obs, action)
# step will never return a terminate==True if there is a success
# but we can return truncate=True if the current path length == max path length
@@ -524,30 +611,52 @@ def step(self, action):
info,
)
- def evaluate_state(self, obs, action):
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
"""Does the heavy-lifting for `step()` -- namely, calculating reward and populating the `info` dict with training metrics.
Returns:
- float: Reward between 0 and 10
- dict: Dictionary which contains useful metrics (success,
+ Tuple of reward between 0 and 10 and a dictionary which contains useful metrics (success,
near_object, grasp_success, grasp_reward, in_place_reward,
obj_to_target, unscaled_reward)
-
"""
# Throw error rather than making this an @abc.abstractmethod so that
# V1 environments don't have to implement it
raise NotImplementedError
- def reset(self, seed=None, options=None):
+ def reset_model(self) -> npt.NDArray[np.float64]:
+ qpos = self.init_qpos
+ qvel = self.init_qvel
+ self.set_state(qpos, qvel)
+ return self._get_obs()
+
+ def reset(
+ self, seed: int | None = None, options: dict[str, Any] | None = None
+ ) -> tuple[npt.NDArray[np.float64], dict[str, Any]]:
+ """Resets the environment.
+
+ Args:
+ seed: The seed to use. Ignored, use `seed()` instead.
+ options: Additional options to pass to the environment. Ignored.
+
+ Returns:
+ The `(obs, info)` tuple.
+ """
self.curr_path_length = 0
+ self.reset_model()
obs, info = super().reset()
- mujoco.mj_forward(self.model, self.data)
self._prev_obs = obs[:18].copy()
obs[18:36] = self._prev_obs
- obs = np.float64(obs)
+ obs = obs.astype(np.float64)
return obs, info
- def _reset_hand(self, steps=50):
+ def _reset_hand(self, steps: int = 50) -> None:
+ """Resets the hand position.
+
+ Args:
+ steps: The number of steps to take to reset the hand.
+ """
mocap_id = self.model.body_mocapid[self.data.body("mocap").id]
for _ in range(steps):
self.data.mocap_pos[mocap_id][:] = self.hand_init_pos
@@ -555,13 +664,13 @@ def _reset_hand(self, steps=50):
self.do_simulation([-1, 1], self.frame_skip)
self.init_tcp = self.tcp_center
- self.init_tcp = self.tcp_center
-
- def _get_state_rand_vec(self):
+ def _get_state_rand_vec(self) -> npt.NDArray[np.float64]:
+ """Gets or generates a random vector for the hand position at reset."""
if self._freeze_rand_vec:
assert self._last_rand_vec is not None
return self._last_rand_vec
elif self.seeded_rand_vec:
+ assert self._random_reset_space is not None
rand_vec = self.np_random.uniform(
self._random_reset_space.low,
self._random_reset_space.high,
@@ -570,7 +679,8 @@ def _get_state_rand_vec(self):
self._last_rand_vec = rand_vec
return rand_vec
else:
- rand_vec = np.random.uniform(
+ assert self._random_reset_space is not None
+ rand_vec: npt.NDArray[np.float64] = np.random.uniform( # type: ignore
self._random_reset_space.low,
self._random_reset_space.high,
size=self._random_reset_space.low.size,
@@ -580,16 +690,16 @@ def _get_state_rand_vec(self):
def _gripper_caging_reward(
self,
- action,
- obj_pos,
- obj_radius,
- pad_success_thresh,
- object_reach_radius,
- xz_thresh,
- desired_gripper_effort=1.0,
- high_density=False,
- medium_density=False,
- ):
+ action: npt.NDArray[np.float32],
+ obj_pos: npt.NDArray[Any],
+ obj_radius: float,
+ pad_success_thresh: float,
+ object_reach_radius: float,
+ xz_thresh: float,
+ desired_gripper_effort: float = 1.0,
+ high_density: bool = False,
+ medium_density: bool = False,
+ ) -> float:
"""Reward for agent grasping obj.
Args:
@@ -607,7 +717,14 @@ def _gripper_caging_reward(
desired_gripper_effort(float): desired gripper effort, defaults to 1.0.
high_density(bool): flag for high-density. Cannot be used with medium-density.
medium_density(bool): flag for medium-density. Cannot be used with high-density.
+
+ Returns:
+ the reward value
"""
+ assert (
+ self.obj_init_pos is not None
+ ), "`obj_init_pos` must be initialized before calling this function."
+
if high_density and medium_density:
raise ValueError("Can only be either high_density or medium_density")
# MARK: Left-right gripper information for caging reward----------------
@@ -686,7 +803,7 @@ def _gripper_caging_reward(
)
# MARK: Combine components----------------------------------------------
- caging = reward_utils.hamacher_product(caging_y, caging_xz)
+ caging = reward_utils.hamacher_product(caging_y, float(caging_xz))
gripping = gripper_closed if caging > 0.97 else 0.0
caging_and_gripping = reward_utils.hamacher_product(caging, gripping)
@@ -706,6 +823,6 @@ def _gripper_caging_reward(
margin=reach_margin,
sigmoid="long_tail",
)
- caging_and_gripping = (caging_and_gripping + reach) / 2
+ caging_and_gripping = (caging_and_gripping + float(reach)) / 2
return caging_and_gripping
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_assembly_peg.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_assembly_peg.py
index 070045073..fc45d7cb1 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_assembly_peg.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_assembly_peg.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerNutAssemblyEnv(SawyerXYZEnv):
@@ -41,14 +38,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_assembly_peg.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, placingDist, _, success = self.compute_reward(
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_basketball.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_basketball.py
index c472aebd0..ab3563c16 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_basketball.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_basketball.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerBasketballEnv(SawyerXYZEnv):
@@ -39,17 +36,19 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
self.goal_space = Box(
np.array(goal_low) + np.array([0, -0.05001, 0.1000]),
np.array(goal_high) + np.array([0, -0.05000, 0.1001]),
+ dtype=np.float64,
)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_basketball.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pickRew, placingDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_bin_picking.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_bin_picking.py
index e3f06a347..f2e8ad9f6 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_bin_picking.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_bin_picking.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerBinPickingEnv(SawyerXYZEnv):
@@ -40,23 +37,25 @@ def __init__(self):
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
+ dtype=np.float64,
)
self.goal_and_obj_space = Box(
np.hstack((goal_low[:2], obj_low[:2])),
np.hstack((goal_high[:2], obj_high[:2])),
+ dtype=np.float64,
)
- self.goal_space = Box(goal_low, goal_high)
+ self.goal_space = Box(goal_low, goal_high, dtype=np.float64)
self._random_reset_space = Box(
- low=np.array([-0.22, -0.02]), high=np.array([0.6, 0.8])
+ low=np.array([-0.22, -0.02]), high=np.array([0.6, 0.8]), dtype=np.float64
)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_bin_picking.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, placingDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_box_close.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_box_close.py
index 4c47c40b6..3092013cd 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_box_close.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_box_close.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerBoxCloseEnv(SawyerXYZEnv):
@@ -38,15 +35,16 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_box.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, placingDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press.py
index 5c1561894..2040f7339 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerButtonPressEnv(SawyerXYZEnv):
@@ -32,16 +29,15 @@ def __init__(self):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_button_press.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown.py
index bab9f7820..b93afe8c8 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerButtonPressTopdownEnv(SawyerXYZEnv):
@@ -33,16 +30,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_button_press_topdown.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown_wall.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown_wall.py
index c6465db14..015c1a0bd 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown_wall.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_topdown_wall.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerButtonPressTopdownWallEnv(SawyerXYZEnv):
@@ -33,16 +30,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_button_press_topdown_wall.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_wall.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_wall.py
index 04a26d55e..341c6881a 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_wall.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_button_press_wall.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerButtonPressWallEnv(SawyerXYZEnv):
@@ -33,17 +30,16 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_button_press_wall.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_button.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_button.py
index fe555f817..1ad1f9ed3 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_button.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_button.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerCoffeeButtonEnv(SawyerXYZEnv):
@@ -36,16 +33,15 @@ def __init__(self):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_coffee.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pushDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_pull.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_pull.py
index b7223aa97..24b13dd6d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_pull.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_pull.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerCoffeePullEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_coffee.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_push.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_push.py
index 30e130441..9b7872ba1 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_push.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_coffee_push.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerCoffeePushEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_coffee.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pushDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_dial_turn.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_dial_turn.py
index 40efe8897..acd469431 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_dial_turn.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_dial_turn.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerDialTurnEnv(SawyerXYZEnv):
@@ -32,16 +29,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_dial.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_disassemble_peg.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_disassemble_peg.py
index f98dddc3d..a4572bf98 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_disassemble_peg.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_disassemble_peg.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerNutDisassembleEnv(SawyerXYZEnv):
@@ -39,14 +36,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_assembly_peg.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, placingDist, success = self.compute_reward(
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door.py
index 73f146539..12bbfd89b 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerDoorEnv(SawyerXYZEnv):
@@ -42,10 +39,9 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.door_angle_idx = self.model.get_joint_qpos_addr("doorjoint")
@@ -53,7 +49,7 @@ def __init__(self):
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_door_pull.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_lock.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_lock.py
index d019dc601..d4cfeeab3 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_lock.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_lock.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerDoorLockEnv(SawyerXYZEnv):
@@ -33,16 +30,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_door_lock.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_unlock.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_unlock.py
index 568aeaea8..15509331d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_unlock.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_door_unlock.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerDoorUnlockEnv(SawyerXYZEnv):
@@ -32,16 +29,15 @@ def __init__(self):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_door_lock.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_close.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_close.py
index 7095b8a02..19adb16d1 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_close.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_close.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerDrawerCloseEnv(SawyerXYZEnv):
@@ -38,16 +35,15 @@ def __init__(self):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_drawer.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_open.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_open.py
index b9142b5b6..5af7f8f52 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_open.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_drawer_open.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerDrawerOpenEnv(SawyerXYZEnv):
@@ -38,16 +35,15 @@ def __init__(self):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_drawer.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_close.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_close.py
index d736057e8..c3f9ccbb1 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_close.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_close.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerFaucetCloseEnv(SawyerXYZEnv):
@@ -33,16 +30,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_faucet.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_open.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_open.py
index e5cd2926a..539413c29 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_open.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_faucet_open.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerFaucetOpenEnv(SawyerXYZEnv):
@@ -32,16 +29,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_faucet.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hammer.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hammer.py
index cfd1df68b..3d55635cc 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hammer.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hammer.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerHammerEnv(SawyerXYZEnv):
@@ -34,14 +31,16 @@ def __init__(self):
self.liftThresh = liftThresh
- self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self._random_reset_space = Box(
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
+ )
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_hammer.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, _, screwDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hand_insert.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hand_insert.py
index fbeadb798..244f88ec9 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hand_insert.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_hand_insert.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerHandInsertEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_table_with_hole.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press.py
index b8fe329ae..91f246b7d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerHandlePressEnv(SawyerXYZEnv):
@@ -34,16 +31,15 @@ def __init__(self):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_handle_press.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press_side.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press_side.py
index 126ce4850..1cb5b9851 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press_side.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_press_side.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerHandlePressSideEnv(SawyerXYZEnv):
@@ -35,16 +32,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_handle_press_sideway.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull.py
index 6ccf11311..85a700a1d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerHandlePullEnv(SawyerXYZEnv):
@@ -35,16 +32,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_handle_press.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull_side.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull_side.py
index b4d0f068d..2e98c1b6d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull_side.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_handle_pull_side.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerHandlePullSideEnv(SawyerXYZEnv):
@@ -35,16 +32,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_handle_press_sideway.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pressDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_lever_pull.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_lever_pull.py
index 520cd6535..ce36f56f2 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_lever_pull.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_lever_pull.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerLeverPullEnv(SawyerXYZEnv):
@@ -33,16 +30,15 @@ def __init__(self):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_lever_pull.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_insertion_side.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_insertion_side.py
index 0e01770a1..8fdd46cab 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_insertion_side.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_insertion_side.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPegInsertionSideEnv(SawyerXYZEnv):
@@ -44,14 +41,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_peg_insertion_side.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, placingDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_unplug_side.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_unplug_side.py
index bbf3ce824..c12fed477 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_unplug_side.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_peg_unplug_side.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPegUnplugSideEnv(SawyerXYZEnv):
@@ -35,16 +32,15 @@ def __init__(self):
self.liftThresh = liftThresh
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_peg_unplug_side.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, placingDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_pick_out_of_hole.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_pick_out_of_hole.py
index a9f822e21..50068d7ef 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_pick_out_of_hole.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_pick_out_of_hole.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPickOutOfHoleEnv(SawyerXYZEnv):
@@ -39,14 +36,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_pick_out_of_hole.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pickRew, placingDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide.py
index b612471ce..e4ba3cd4c 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPlateSlideEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_plate_slide.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back.py
index b474ad4ab..09e3a8de3 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPlateSlideBackEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_plate_slide.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back_side.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back_side.py
index f72fa61b0..de4bbd251 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back_side.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_back_side.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPlateSlideBackSideEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_plate_slide_sideway.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_side.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_side.py
index a25a9d881..06e533336 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_side.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_plate_slide_side.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPlateSlideSideEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_plate_slide_sideway.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_push_back.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_push_back.py
index ec018dc53..b39bca763 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_push_back.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_push_back.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerPushBackEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_push_back.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pushDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place.py
index 4d6eca798..0dbceec9d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerReachPushPickPlaceEnv(SawyerXYZEnv):
@@ -42,8 +39,9 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.num_resets = 0
@@ -67,7 +65,7 @@ def _set_task_inner(self, *, task_type, **kwargs):
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_reach_push_pick_and_place.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
(
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place_wall.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place_wall.py
index 88bbf802f..9195cd5f0 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place_wall.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_reach_push_pick_place_wall.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerReachPushPickPlaceWallEnv(SawyerXYZEnv):
@@ -42,8 +39,9 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.num_resets = 0
@@ -66,7 +64,7 @@ def _set_task_inner(self, *, task_type, **kwargs):
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_reach_push_pick_and_place_wall.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
(
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_shelf_place.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_shelf_place.py
index 0d17087f5..838ce82d9 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_shelf_place.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_shelf_place.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerShelfPlaceEnv(SawyerXYZEnv):
@@ -39,10 +36,12 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
self.goal_space = Box(
np.array(goal_low) + np.array([0.0, 0.0, 0.299]),
np.array(goal_high) + np.array([0.0, 0.0, 0.301]),
+ dtype=np.float64,
)
self.num_resets = 0
@@ -51,7 +50,7 @@ def __init__(self):
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_shelf_placing.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, placingDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_soccer.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_soccer.py
index f5d879071..e92fc1c33 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_soccer.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_soccer.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerSoccerEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_soccer.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pushDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_pull.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_pull.py
index cdbe37df1..9ff2c51fc 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_pull.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_pull.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerStickPullEnv(SawyerXYZEnv):
@@ -37,18 +34,19 @@ def __init__(self):
# Fix object init position.
self.obj_init_pos = np.array([0.2, 0.69, 0.04])
self.obj_init_qpos = np.array([0.0, 0.09])
- self.obj_space = Box(np.array(obj_low), np.array(obj_high))
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.obj_space = Box(np.array(obj_low), np.array(obj_high), dtype=np.float64)
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_stick_obj.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, pullDist, _ = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_push.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_push.py
index 309cc7a92..7730560f9 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_push.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_stick_push.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerStickPushEnv(SawyerXYZEnv):
@@ -35,18 +32,19 @@ def __init__(self):
self.liftThresh = liftThresh # For now, fix the object initial position.
self.obj_init_pos = np.array([0.2, 0.6, 0.04])
self.obj_init_qpos = np.array([0.0, 0.0])
- self.obj_space = Box(np.array(obj_low), np.array(obj_high))
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.obj_space = Box(np.array(obj_low), np.array(obj_high), dtype=np.float64)
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_stick_obj.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, _, reachDist, pickRew, _, pushDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep.py
index a54f5dc49..bb04df521 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerSweepEnv(SawyerXYZEnv):
@@ -37,16 +34,15 @@ def __init__(self):
self.init_puck_z = init_puck_z
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_sweep.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pushDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep_into_goal.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep_into_goal.py
index cd1da5af4..5f85bb547 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep_into_goal.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_sweep_into_goal.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerSweepIntoGoalEnv(SawyerXYZEnv):
@@ -36,14 +33,15 @@ def __init__(self):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_table_with_hole.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pushDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_close.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_close.py
index fce7bed9d..6fedea773 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_close.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_close.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerWindowCloseEnv(SawyerXYZEnv):
@@ -38,16 +35,15 @@ def __init__(self):
self.liftThresh = liftThresh
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_window_horizontal.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pickrew, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_open.py b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_open.py
index 484d5fe89..a4f6b5722 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_open.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v1/sawyer_window_open.py
@@ -2,10 +2,7 @@
from gymnasium.spaces import Box
from metaworld.envs.asset_path_utils import full_v1_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
class SawyerWindowOpenEnv(SawyerXYZEnv):
@@ -43,16 +40,15 @@ def __init__(self):
self.liftThresh = liftThresh
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
def model_name(self):
return full_v1_path_for("sawyer_xyz/sawyer_window_horizontal.xml")
- @_assert_task_is_set
+ @SawyerXYZEnv._Decorators.assert_task_is_set
def step(self, action):
ob = super().step(action)
reward, reachDist, pickrew, pullDist = self.compute_reward(action, ob)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_assembly_peg_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_assembly_peg_v2.py
index 3a5c2ce29..2aef7b79d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_assembly_peg_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_assembly_peg_v2.py
@@ -1,19 +1,24 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils.reward_utils import tolerance
+from metaworld.types import InitConfigDict, ObservationDict
class SawyerNutAssemblyEnvV2(SawyerXYZEnv):
- WRENCH_HANDLE_LENGTH = 0.02
+ WRENCH_HANDLE_LENGTH: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0, 0.6, 0.02)
@@ -22,15 +27,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.85, 0.1)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.6, 0.02], dtype=np.float32),
"hand_init_pos": np.array((0, 0.6, 0.2), dtype=np.float32),
@@ -44,15 +46,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_assembly_peg.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
reward_grab,
@@ -74,27 +79,28 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert isinstance(
+ self._target_pos, np.ndarray
+ ), "`reset_model()` must be called before `_target_site_config` is accessed."
return [("pegTop", self._target_pos)]
- def _get_id_main_object(self):
+ def _get_id_main_object(self) -> int:
"""TODO: Reggie"""
- return self.unwrapped.model.geom_name2id("WrenchHandle")
+ return self.model.geom_name2id("WrenchHandle")
- def _get_pos_objects(self):
- return self.data.site_xpos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "RoundNut-8")
- ]
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
+ return self.data.site("RoundNut-8").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("RoundNut").xquat
- def _get_obs_dict(self):
+ def _get_obs_dict(self) -> ObservationDict:
obs_dict = super()._get_obs_dict()
obs_dict["state_achieved_goal"] = self.get_body_com("RoundNut")
return obs_dict
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.1:
@@ -103,31 +109,29 @@ def reset_model(self):
self._target_pos = goal_pos[-3:]
peg_pos = self._target_pos - np.array([0.0, 0.0, 0.05])
self._set_obj_xyz(self.obj_init_pos)
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "peg")
- ] = peg_pos
- self.model.site_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "pegTop")
- ] = self._target_pos
+ self.model.body("peg").pos = peg_pos
+ self.model.site("pegTop").pos = self._target_pos
return self._get_obs()
@staticmethod
- def _reward_quat(obs):
+ def _reward_quat(obs: npt.NDArray[np.float64]) -> float:
# Ideal laid-down wrench has quat [.707, 0, 0, .707]
# Rather than deal with an angle between quaternions, just approximate:
ideal = np.array([0.707, 0, 0, 0.707])
- error = np.linalg.norm(obs[7:11] - ideal)
+ error = float(np.linalg.norm(obs[7:11] - ideal))
return max(1.0 - error / 0.4, 0.0)
@staticmethod
- def _reward_pos(wrench_center, target_pos):
+ def _reward_pos(
+ wrench_center: npt.NDArray[Any], target_pos: npt.NDArray[Any]
+ ) -> tuple[float, bool]:
pos_error = target_pos - wrench_center
radius = np.linalg.norm(pos_error[:2])
aligned = radius < 0.02
hooked = pos_error[2] > 0.0
- success = aligned and hooked
+ success = bool(aligned and hooked)
# Target height is a 3D funnel centered on the peg.
# use the success flag to widen the bottleneck once the agent
@@ -144,8 +148,8 @@ def _reward_pos(wrench_center, target_pos):
a = 0.1 # Relative importance of just *trying* to lift the wrench
b = 0.9 # Relative importance of placing the wrench on the peg
lifted = wrench_center[2] > 0.02 or radius < threshold
- in_place = a * float(lifted) + b * reward_utils.tolerance(
- np.linalg.norm(pos_error * scale),
+ in_place = a * float(lifted) + b * tolerance(
+ float(np.linalg.norm(pos_error * scale)),
bounds=(0, 0.02),
margin=0.4,
sigmoid="long_tail",
@@ -153,7 +157,13 @@ def _reward_pos(wrench_center, target_pos):
return in_place, success
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, bool]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+
hand = obs[:3]
wrench = obs[4:7]
wrench_center = self._get_site_pos("RoundNut")
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_basketball_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_basketball_v2.py
index 05684e186..a934288c7 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_basketball_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_basketball_v2.py
@@ -1,20 +1,25 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerBasketballEnvV2(SawyerXYZEnv):
- PAD_SUCCESS_MARGIN = 0.06
- TARGET_RADIUS = 0.08
+ PAD_SUCCESS_MARGIN: float = 0.06
+ TARGET_RADIUS: float = 0.08
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.0299)
@@ -23,15 +28,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.9 + 1e-7, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.6, 0.03], dtype=np.float32),
"hand_init_pos": np.array((0, 0.6, 0.2), dtype=np.float32),
@@ -44,18 +46,22 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
self.goal_space = Box(
np.array(goal_low) + np.array([0, -0.083, 0.2499]),
np.array(goal_high) + np.array([0, -0.083, 0.2501]),
+ dtype=np.float64,
)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_basketball.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -66,6 +72,7 @@ def evaluate_state(self, obs, action):
in_place_reward,
) = self.compute_reward(action, obs)
+ assert self.obj_init_pos is not None
info = {
"success": float(obj_to_target <= self.TARGET_RADIUS),
"near_object": float(tcp_to_obj <= 0.05),
@@ -80,16 +87,16 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("objGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("objGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("bsktball")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("bsktball").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.prev_obs = self._get_curr_obs_combined_no_goal()
goal_pos = self._get_state_rand_vec()
@@ -97,17 +104,21 @@ def reset_model(self):
while np.linalg.norm(goal_pos[:2] - basket_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
basket_pos = goal_pos[3:]
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "basket_goal")
- ] = basket_pos
- self._target_pos = self.data.site_xpos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "goal")
- ]
+ assert self.obj_init_pos is not None
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
+ self.model.body("basket_goal").pos = basket_pos
+ self._target_pos = self.data.site("goal").xpos
self._set_obj_xyz(self.obj_init_pos)
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None and self.obj_init_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+
obj = obs[4:7]
# Force target to be slightly above basketball hoop
target = self._target_pos.copy()
@@ -116,7 +127,7 @@ def compute_reward(self, action, obs):
# Emphasize Z error
scale = np.array([1.0, 1.0, 2.0])
target_to_obj = (obj - target) * scale
- target_to_obj = np.linalg.norm(target_to_obj)
+ target_to_obj = float(np.linalg.norm(target_to_obj))
target_to_obj_init = (self.obj_init_pos - target) * scale
target_to_obj_init = np.linalg.norm(target_to_obj_init)
@@ -126,8 +137,8 @@ def compute_reward(self, action, obs):
margin=target_to_obj_init,
sigmoid="long_tail",
)
- tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
+ tcp_opened = float(obs[3])
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
object_grasped = self._gripper_caging_reward(
action,
@@ -143,7 +154,7 @@ def compute_reward(self, action, obs):
and tcp_opened > 0
and obj[2] - 0.01 > self.obj_init_pos[2]
):
- object_grasped = 1
+ object_grasped = 1.0
reward = reward_utils.hamacher_product(object_grasped, in_place)
if (
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_bin_picking_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_bin_picking_v2.py
index 979e1ff41..be9c1c077 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_bin_picking_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_bin_picking_v2.py
@@ -1,12 +1,15 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerBinPickingEnvV2(SawyerXYZEnv):
@@ -23,7 +26,10 @@ class SawyerBinPickingEnvV2(SawyerXYZEnv):
- (11/23/20) Updated reward function to new pick-place style
"""
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.07)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.21, 0.65, 0.02)
@@ -33,15 +39,11 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = np.array([0.1201, 0.701, +0.001])
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
-
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([-0.12, 0.7, 0.02]),
"hand_init_pos": np.array((0, 0.6, 0.2)),
@@ -51,30 +53,35 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.obj_init_angle = self.init_config["obj_init_angle"]
self.hand_init_pos = self.init_config["hand_init_pos"]
- self._target_to_obj_init = None
+ self._target_to_obj_init: float | None = None
self.hand_and_obj_space = Box(
np.hstack((self.hand_low, obj_low)),
np.hstack((self.hand_high, obj_high)),
+ dtype=np.float64,
)
self.goal_and_obj_space = Box(
np.hstack((goal_low[:2], obj_low[:2])),
np.hstack((goal_high[:2], obj_high[:2])),
+ dtype=np.float64,
)
- self.goal_space = Box(goal_low, goal_high)
+ self.goal_space = Box(goal_low, goal_high, dtype=np.float64)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_bin_picking.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
near_object,
@@ -97,19 +104,19 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("objGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("objGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("obj").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config["obj_init_pos"]
@@ -117,7 +124,7 @@ def reset_model(self):
obj_height = self.get_body_com("obj")[2]
self.obj_init_pos = self._get_state_rand_vec()[:2]
- self.obj_init_pos = np.concatenate((self.obj_init_pos, [obj_height]))
+ self.obj_init_pos = np.concatenate([self.obj_init_pos, [obj_height]])
self._set_obj_xyz(self.obj_init_pos)
self._target_pos = self.get_body_com("bin_goal")
@@ -125,11 +132,17 @@ def reset_model(self):
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[Any]
+ ) -> tuple[float, bool, bool, float, float, float]:
+ assert (
+ self.obj_init_pos is not None and self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+
hand = obs[:3]
obj = obs[4:7]
- target_to_obj = np.linalg.norm(obj - self._target_pos)
+ target_to_obj = float(np.linalg.norm(obj - self._target_pos))
if self._target_to_obj_init is None:
self._target_to_obj_init = target_to_obj
@@ -178,9 +191,9 @@ def compute_reward(self, action, obs):
)
reward = reward_utils.hamacher_product(object_grasped, in_place)
- near_object = np.linalg.norm(obj - hand) < 0.04
- pinched_without_obj = obs[3] < 0.43
- lifted = obj[2] - 0.02 > self.obj_init_pos[2]
+ near_object = bool(np.linalg.norm(obj - hand) < 0.04)
+ pinched_without_obj = bool(obs[3] < 0.43)
+ lifted = bool(obj[2] - 0.02 > self.obj_init_pos[2])
# Increase reward when properly grabbed obj
grasp_success = near_object and lifted and not pinched_without_obj
if grasp_success:
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_box_close_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_box_close_v2.py
index 3d653bd65..2dbc14b2b 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_box_close_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_box_close_v2.py
@@ -1,17 +1,23 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerBoxCloseEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.5, 0.02)
@@ -20,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.8, 0.133)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.55, 0.02], dtype=np.float32),
"hand_init_pos": np.array((0, 0.6, 0.2), dtype=np.float32),
@@ -40,20 +43,23 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._target_to_obj_init = None
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
self.init_obj_quat = None
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_box.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
reward_grab,
@@ -75,19 +81,19 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("BoxHandleGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("BoxHandleGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("top_link")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("top_link").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self.init_config["obj_init_pos"]
self.obj_init_angle = self.init_config["obj_init_angle"]
@@ -96,34 +102,36 @@ def reset_model(self):
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.25:
goal_pos = self._get_state_rand_vec()
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
self._target_pos = goal_pos[-3:]
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "boxbody")
- ] = np.concatenate((self._target_pos[:2], [box_height]))
+ self.model.body("boxbody").pos = np.concatenate(
+ [self._target_pos[:2], [box_height]]
+ )
for _ in range(self.frame_skip):
mujoco.mj_step(self.model, self.data)
self._set_obj_xyz(self.obj_init_pos)
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
@staticmethod
- def _reward_grab_effort(actions):
- return (np.clip(actions[3], -1, 1) + 1.0) / 2.0
+ def _reward_grab_effort(actions: npt.NDArray[Any]) -> float:
+ return float((np.clip(actions[3], -1, 1) + 1.0) / 2.0)
@staticmethod
- def _reward_quat(obs):
+ def _reward_quat(obs) -> float:
# Ideal upright lid has quat [.707, 0, 0, .707]
# Rather than deal with an angle between quaternions, just approximate:
ideal = np.array([0.707, 0, 0, 0.707])
- error = np.linalg.norm(obs[7:11] - ideal)
+ error = float(np.linalg.norm(obs[7:11] - ideal))
return max(1.0 - error / 0.2, 0.0)
@staticmethod
- def _reward_pos(obs, target_pos):
+ def _reward_pos(
+ obs: npt.NDArray[np.float64], target_pos: npt.NDArray[Any]
+ ) -> tuple[float, float]:
hand = obs[:3]
lid = obs[4:7] + np.array([0.0, 0.0, 0.02])
@@ -148,7 +156,7 @@ def _reward_pos(obs, target_pos):
)
# grab the lid's handle
in_place = reward_utils.tolerance(
- np.linalg.norm(hand - lid),
+ float(np.linalg.norm(hand - lid)),
bounds=(0, 0.02),
margin=0.5,
sigmoid="long_tail",
@@ -161,7 +169,7 @@ def _reward_pos(obs, target_pos):
a = 0.2 # Relative importance of just *trying* to lift the lid at all
b = 0.8 # Relative importance of placing the lid on the box
lifted = a * float(lid[2] > 0.04) + b * reward_utils.tolerance(
- np.linalg.norm(pos_error * error_scale),
+ float(np.linalg.norm(pos_error * error_scale)),
bounds=(0, 0.05),
margin=0.25,
sigmoid="long_tail",
@@ -169,7 +177,13 @@ def _reward_pos(obs, target_pos):
return ready_to_lift, lifted
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, bool]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+
reward_grab = SawyerBoxCloseEnvV2._reward_grab_effort(actions)
reward_quat = SawyerBoxCloseEnvV2._reward_quat(obs)
reward_steps = SawyerBoxCloseEnvV2._reward_pos(obs, self._target_pos)
@@ -182,7 +196,7 @@ def compute_reward(self, actions, obs):
)
# Override reward on success
- success = np.linalg.norm(obs[4:7] - self._target_pos) < 0.08
+ success = bool(np.linalg.norm(obs[4:7] - self._target_pos) < 0.08)
if success:
reward = 10.0
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_v2.py
index 5bf16c140..5ba165ab7 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_v2.py
@@ -1,32 +1,34 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerButtonPressTopdownEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.115)
obj_high = (0.1, 0.9, 0.115)
super().__init__(
- self.model_name,
hand_low=hand_low,
- hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ hand_high=hand_high,
+ **render_kwargs,
)
-
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.8, 0.115], dtype=np.float32),
"hand_init_pos": np.array([0, 0.4, 0.2], dtype=np.float32),
}
@@ -38,17 +40,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_button_press_topdown.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -70,32 +73,30 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("btnGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("btnGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("button") + np.array([0.0, 0.0, 0.193])
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("button").xquat
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
+ self.model.body("box").pos = self.obj_init_pos
mujoco.mj_forward(self.model, self.data)
self._target_pos = self._get_site_pos("hole")
@@ -104,13 +105,18 @@ def reset_model(self):
)
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
obj = obs[4:7]
tcp = self.tcp_center
- tcp_to_obj = np.linalg.norm(obj - tcp)
- tcp_to_obj_init = np.linalg.norm(obj - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ tcp_to_obj_init = float(np.linalg.norm(obj - self.init_tcp))
obj_to_target = abs(self._target_pos[2] - obj[2])
tcp_closed = 1 - obs[3]
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_wall_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_wall_v2.py
index 4cba6632d..242f650e1 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_wall_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_topdown_wall_v2.py
@@ -1,32 +1,35 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerButtonPressTopdownWallEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.115)
obj_high = (0.1, 0.9, 0.115)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.8, 0.115], dtype=np.float32),
"hand_init_pos": np.array([0, 0.4, 0.2], dtype=np.float32),
}
@@ -38,17 +41,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_button_press_topdown_wall.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -71,34 +75,32 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("btnGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("btnGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("button") + np.array([0.0, 0.0, 0.193])
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("button").xquat
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
+ self.model.body("box").pos = self.obj_init_pos
mujoco.mj_forward(self.model, self.data)
self._target_pos = self._get_site_pos("hole")
@@ -108,13 +110,18 @@ def reset_model(self):
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
obj = obs[4:7]
tcp = self.tcp_center
- tcp_to_obj = np.linalg.norm(obj - tcp)
- tcp_to_obj_init = np.linalg.norm(obj - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ tcp_to_obj_init = float(np.linalg.norm(obj - self.init_tcp))
obj_to_target = abs(self._target_pos[2] - obj[2])
tcp_closed = 1 - obs[3]
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_v2.py
index b64278cde..0897de057 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_v2.py
@@ -1,32 +1,34 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerButtonPressEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.85, 0.115)
obj_high = (0.1, 0.9, 0.115)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0.0, 0.9, 0.115], dtype=np.float32),
"hand_init_pos": np.array([0, 0.4, 0.2], dtype=np.float32),
}
@@ -37,17 +39,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_button_press.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -70,36 +73,34 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("btnGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("btnGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("button") + np.array([0.0, -0.193, 0.0])
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("button").xquat
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config["obj_init_pos"]
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
- self._set_obj_xyz(0)
+ self.model.body("box").pos = self.obj_init_pos
+ self._set_obj_xyz(np.array(0))
self._target_pos = self._get_site_pos("hole")
self._obj_to_target_init = abs(
@@ -108,13 +109,18 @@ def reset_model(self):
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
obj = obs[4:7]
tcp = self.tcp_center
- tcp_to_obj = np.linalg.norm(obj - tcp)
- tcp_to_obj_init = np.linalg.norm(obj - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ tcp_to_obj_init = float(np.linalg.norm(obj - self.init_tcp))
obj_to_target = abs(self._target_pos[1] - obj[1])
tcp_closed = max(obs[3], 0.0)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_wall_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_wall_v2.py
index 1c9a05bb5..aa247a752 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_wall_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_button_press_wall_v2.py
@@ -1,32 +1,34 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerButtonPressWallEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.85, 0.1149)
obj_high = (0.05, 0.9, 0.1151)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0.0, 0.9, 0.115], dtype=np.float32),
"hand_init_pos": np.array([0, 0.4, 0.2], dtype=np.float32),
}
@@ -38,18 +40,19 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_button_press_wall.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -72,26 +75,26 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("btnGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("btnGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("button") + np.array([0.0, -0.193, 0.0])
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("button").xquat
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config["obj_init_pos"]
@@ -99,11 +102,9 @@ def reset_model(self):
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
+ self.model.body("box").pos = self.obj_init_pos
- self._set_obj_xyz(0)
+ self._set_obj_xyz(np.array(0))
self._target_pos = self._get_site_pos("hole")
self._obj_to_target_init = abs(
@@ -112,13 +113,18 @@ def reset_model(self):
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
obj = obs[4:7]
tcp = self.tcp_center
- tcp_to_obj = np.linalg.norm(obj - tcp)
- tcp_to_obj_init = np.linalg.norm(obj - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ tcp_to_obj_init = float(np.linalg.norm(obj - self.init_tcp))
obj_to_target = abs(self._target_pos[1] - obj[1])
near_button = reward_utils.tolerance(
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_button_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_button_v2.py
index 2c98b147b..3223639ab 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_button_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_button_v2.py
@@ -1,17 +1,22 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerCoffeeButtonEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
self.max_dist = 0.03
hand_low = (-0.5, 0.4, 0.05)
@@ -24,15 +29,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = obj_high + np.array([+0.001, -0.22 + self.max_dist, 0.301])
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.9, 0.28]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0.0, 0.4, 0.2]),
@@ -43,17 +45,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_coffee.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -76,32 +79,33 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [("coffee_goal", self._target_pos)]
def _get_id_main_object(self):
return None
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("buttonStart")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.array([1.0, 0.0, 0.0, 0.0])
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flatten()
qvel = self.data.qvel.flatten()
qpos[0:3] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self._get_state_rand_vec()
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "coffee_machine")
- ] = self.obj_init_pos
+ self.model.body("coffee_machine").pos = self.obj_init_pos
pos_mug = self.obj_init_pos + np.array([0.0, -0.22, 0.0])
self._set_obj_xyz(pos_mug)
@@ -111,13 +115,18 @@ def reset_model(self):
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
obj = obs[4:7]
tcp = self.tcp_center
- tcp_to_obj = np.linalg.norm(obj - tcp)
- tcp_to_obj_init = np.linalg.norm(obj - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ tcp_to_obj_init = float(np.linalg.norm(obj - self.init_tcp))
obj_to_target = abs(self._target_pos[1] - obj[1])
tcp_closed = max(obs[3], 0.0)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_pull_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_pull_v2.py
index 8586fccf1..71085d719 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_pull_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_pull_v2.py
@@ -1,18 +1,23 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerCoffeePullEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.7, -0.001)
@@ -21,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.65, +0.001)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.75, 0.0]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0.0, 0.4, 0.2]),
@@ -42,15 +44,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_coffee.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -61,7 +66,7 @@ def evaluate_state(self, obs, action):
) = self.compute_reward(action, obs)
success = float(obj_to_target <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
- grasp_success = float(self.touching_object and (tcp_open > 0))
+ grasp_success = float(self.touching_main_object and (tcp_open > 0))
info = {
"success": success,
@@ -76,24 +81,30 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [("mug_goal", self._target_pos)]
- def _get_pos_objects(self):
+ def _get_id_main_object(self) -> int:
+ return self.data.geom("mug").id
+
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("mug").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flatten()
qvel = self.data.qvel.flatten()
qpos[0:3] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
pos_mug_init, pos_mug_goal = np.split(self._get_state_rand_vec(), 2)
@@ -104,14 +115,18 @@ def reset_model(self):
self.obj_init_pos = pos_mug_init
pos_machine = pos_mug_init + np.array([0.0, 0.22, 0.0])
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "coffee_machine")
- ] = pos_machine
+ self.model.body("coffee_machine").pos = pos_machine
self._target_pos = pos_mug_goal
+ self.model.site("mug_goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
obj = obs[4:7]
target = self._target_pos.copy()
@@ -129,7 +144,7 @@ def compute_reward(self, action, obs):
sigmoid="long_tail",
)
tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
object_grasped = self._gripper_caging_reward(
action,
@@ -152,7 +167,7 @@ def compute_reward(self, action, obs):
reward,
tcp_to_obj,
tcp_opened,
- np.linalg.norm(obj - target), # recompute to avoid `scale` above
+ float(np.linalg.norm(obj - target)), # recompute to avoid `scale` above
object_grasped,
in_place,
)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_push_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_push_v2.py
index 6bd0c40c0..280469d74 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_push_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_coffee_push_v2.py
@@ -1,18 +1,23 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerCoffeePushEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.55, -0.001)
@@ -21,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.05, 0.75, +0.001)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.6, 0.0]),
"hand_init_pos": np.array([0.0, 0.4, 0.2]),
@@ -42,15 +44,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_coffee.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -61,7 +66,7 @@ def evaluate_state(self, obs, action):
) = self.compute_reward(action, obs)
success = float(obj_to_target <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
- grasp_success = float(self.touching_object and (tcp_open > 0))
+ grasp_success = float(self.touching_main_object and (tcp_open > 0))
info = {
"success": success,
@@ -76,24 +81,30 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [("coffee_goal", self._target_pos)]
- def _get_pos_objects(self):
+ def _get_id_main_object(self) -> int:
+ return self.data.geom("mug").id
+
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("mug").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flatten()
qvel = self.data.qvel.flatten()
qpos[0:3] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
pos_mug_init, pos_mug_goal = np.split(self._get_state_rand_vec(), 2)
@@ -105,14 +116,18 @@ def reset_model(self):
pos_machine = pos_mug_goal + np.array([0.0, 0.22, 0.0])
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "coffee_machine")
- ] = pos_machine
+ self.model.body("coffee_machine").pos = pos_machine
self._target_pos = pos_mug_goal
+ self.model.site("mug_goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
obj = obs[4:7]
target = self._target_pos.copy()
@@ -130,7 +145,7 @@ def compute_reward(self, action, obs):
sigmoid="long_tail",
)
tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
object_grasped = self._gripper_caging_reward(
action,
@@ -153,7 +168,7 @@ def compute_reward(self, action, obs):
reward,
tcp_to_obj,
tcp_opened,
- np.linalg.norm(obj - target), # recompute to avoid `scale` above
+ float(np.linalg.norm(obj - target)), # recompute to avoid `scale` above
object_grasped,
in_place,
)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py
index 5dfa86d37..b53555591 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py
@@ -1,19 +1,25 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerDialTurnEnvV2(SawyerXYZEnv):
- TARGET_RADIUS = 0.07
+ TARGET_RADIUS: float = 0.07
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.7, 0.0)
@@ -22,15 +28,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.83, 0.0301)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.7, 0.0]),
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
}
@@ -39,17 +42,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_dial.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -71,12 +75,12 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
dial_center = self.get_body_com("dial").copy()
dial_angle_rad = self.data.joint("knob_Joint_1").qpos
offset = np.array(
- [np.sin(dial_angle_rad), -np.cos(dial_angle_rad), 0], dtype=object
+ [np.sin(dial_angle_rad).item(), -np.cos(dial_angle_rad).item(), 0.0]
)
dial_radius = 0.05
@@ -84,10 +88,10 @@ def _get_pos_objects(self):
return dial_center + offset
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("dial").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config["obj_init_pos"]
@@ -97,21 +101,25 @@ def reset_model(self):
self.obj_init_pos = goal_pos[:3]
final_pos = goal_pos.copy() + np.array([0, 0.03, 0.03])
self._target_pos = final_pos
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "dial")
- ] = self.obj_init_pos
+ self.model.body("dial").pos = self.obj_init_pos
self.dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
+ self.model.site("goal").pos = self._target_pos
mujoco.mj_forward(self.model, self.data)
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
obj = self._get_pos_objects()
dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = obj - target
- target_to_obj = np.linalg.norm(target_to_obj)
+ target_to_obj = float(np.linalg.norm(target_to_obj).item())
target_to_obj_init = self.dial_push_position - target
target_to_obj_init = np.linalg.norm(target_to_obj_init)
@@ -123,8 +131,10 @@ def compute_reward(self, action, obs):
)
dial_reach_radius = 0.005
- tcp_to_obj = np.linalg.norm(dial_push_position - tcp)
- tcp_to_obj_init = np.linalg.norm(self.dial_push_position - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(dial_push_position - tcp).item())
+ tcp_to_obj_init = float(
+ np.linalg.norm(self.dial_push_position - self.init_tcp).item()
+ )
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, dial_reach_radius),
@@ -139,7 +149,7 @@ def compute_reward(self, action, obs):
reward = 10 * reward_utils.hamacher_product(reach, in_place)
return (
- reward[0],
+ reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_disassemble_peg_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_disassemble_peg_v2.py
index ddd6cc43b..bea2c5619 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_disassemble_peg_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_disassemble_peg_v2.py
@@ -1,19 +1,25 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerNutDisassembleEnvV2(SawyerXYZEnv):
- WRENCH_HANDLE_LENGTH = 0.02
+ WRENCH_HANDLE_LENGTH: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0.0, 0.6, 0.025)
@@ -22,15 +28,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.75, 0.1701)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.7, 0.025]),
"hand_init_pos": np.array((0, 0.4, 0.2), dtype=np.float32),
@@ -43,18 +46,22 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
self.goal_space = Box(
np.array(goal_low) + np.array([0.0, 0.0, 0.005]),
np.array(goal_high) + np.array([0.0, 0.0, 0.005]),
+ dtype=np.float64,
)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_assembly_peg.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
reward_grab,
@@ -76,16 +83,19 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [("pegTop", self._target_pos)]
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("WrenchHandle")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("WrenchHandle")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("RoundNut-8")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("RoundNut").xquat
def _get_obs_dict(self):
@@ -93,7 +103,7 @@ def _get_obs_dict(self):
obs_dict["state_achieved_goal"] = self.get_body_com("RoundNut")
return obs_dict
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = np.array(self.init_config["obj_init_pos"])
@@ -107,33 +117,31 @@ def reset_model(self):
peg_pos = self.obj_init_pos + np.array([0.0, 0.0, 0.03])
peg_top_pos = self.obj_init_pos + np.array([0.0, 0.0, 0.08])
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "peg")
- ] = peg_pos
- self.model.site_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "pegTop")
- ] = peg_top_pos
+ self.model.body("peg").pos = peg_pos
+ self.model.site("pegTop").pos = peg_top_pos
mujoco.mj_forward(self.model, self.data)
self._set_obj_xyz(self.obj_init_pos)
return self._get_obs()
@staticmethod
- def _reward_quat(obs):
+ def _reward_quat(obs: npt.NDArray[np.float64]) -> float:
# Ideal laid-down wrench has quat [.707, 0, 0, .707]
# Rather than deal with an angle between quaternions, just approximate:
ideal = np.array([0.707, 0, 0, 0.707])
- error = np.linalg.norm(obs[7:11] - ideal)
+ error = float(np.linalg.norm(obs[7:11] - ideal))
return max(1.0 - error / 0.4, 0.0)
@staticmethod
- def _reward_pos(wrench_center, target_pos):
+ def _reward_pos(
+ wrench_center: npt.NDArray[Any], target_pos: npt.NDArray[Any]
+ ) -> float:
pos_error = target_pos + np.array([0.0, 0.0, 0.1]) - wrench_center
a = 0.1 # Relative importance of just *trying* to lift the wrench
b = 0.9 # Relative importance of placing the wrench on the peg
lifted = wrench_center[2] > 0.02
in_place = a * float(lifted) + b * reward_utils.tolerance(
- np.linalg.norm(pos_error),
+ float(np.linalg.norm(pos_error)),
bounds=(0, 0.02),
margin=0.2,
sigmoid="long_tail",
@@ -141,7 +149,13 @@ def _reward_pos(wrench_center, target_pos):
return in_place
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, bool]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+
hand = obs[:3]
wrench = obs[4:7]
wrench_center = self._get_site_pos("RoundNut")
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_close_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_close_v2.py
index 656329d73..42b22a5f6 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_close_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_close_v2.py
@@ -1,17 +1,23 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerDoorCloseEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (0.2, 0.65, 0.1499)
goal_high = (0.3, 0.75, 0.1501)
hand_low = (-0.5, 0.40, 0.05)
@@ -20,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.1, 0.95, 0.15)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.1, 0.95, 0.15], dtype=np.float32),
"hand_init_pos": np.array([-0.5, 0.6, 0.2], dtype=np.float32),
@@ -41,33 +44,32 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.door_qpos_adr = self.model.joint("doorjoint").qposadr.item()
self.door_qvel_adr = self.model.joint("doorjoint").dofadr.item()
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_door_pull.xml")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("handle").xpos.copy()
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return Rotation.from_matrix(
self.data.geom("handle").xmat.reshape(3, 3)
).as_quat()
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.copy()
qvel = self.data.qvel.copy()
qpos[self.door_qpos_adr] = pos
qvel[self.door_qvel_adr] = 0
self.set_state(qpos.flatten(), qvel.flatten())
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.objHeight = self.data.geom("handle").xpos[2]
obj_pos = self._get_state_rand_vec()
@@ -79,12 +81,14 @@ def reset_model(self):
self.model.site("goal").pos = self._target_pos
# keep the door open after resetting initial positions
- self._set_obj_xyz(-1.5708)
-
+ self._set_obj_xyz(np.array(-1.5708))
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
reward, obj_to_target, in_place = self.compute_reward(action, obs)
info = {
"obj_to_target": obj_to_target,
@@ -97,15 +101,20 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def compute_reward(self, actions, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float]:
+ assert (
+ self._target_pos is not None and self.hand_init_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
target = self._target_pos
- tcp_to_target = np.linalg.norm(tcp - target)
- # tcp_to_obj = np.linalg.norm(tcp - obj)
- obj_to_target = np.linalg.norm(obj - target)
+ tcp_to_target = float(np.linalg.norm(tcp - target))
+ # tcp_to_obj = float(np.linalg.norm(tcp - obj))
+ obj_to_target = float(np.linalg.norm(obj - target))
in_place_margin = np.linalg.norm(self.obj_init_pos - target)
in_place = reward_utils.tolerance(
@@ -115,7 +124,7 @@ def compute_reward(self, actions, obs):
sigmoid="gaussian",
)
- hand_margin = np.linalg.norm(self.hand_init_pos - obj) + 0.1
+ hand_margin = float(np.linalg.norm(self.hand_init_pos - obj)) + 0.1
hand_in_place = reward_utils.tolerance(
tcp_to_target,
bounds=(0, 0.25 * _TARGET_RADIUS),
@@ -128,4 +137,4 @@ def compute_reward(self, actions, obs):
if obj_to_target < _TARGET_RADIUS:
reward = 10
- return [reward, obj_to_target, hand_in_place]
+ return (reward, obj_to_target, hand_in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_lock_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_lock_v2.py
index 34a1b4c5f..79d6a8dc1 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_lock_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_lock_v2.py
@@ -1,32 +1,35 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerDoorLockEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.15)
obj_high = (0.1, 0.85, 0.15)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.85, 0.15], dtype=np.float32),
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
}
@@ -40,17 +43,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._lock_length = 0.1
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_door_lock.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -73,7 +77,10 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [
("goal_lock", self._target_pos),
("goal_unlock", np.array([10.0, 10.0, 10.0])),
@@ -82,13 +89,13 @@ def _target_site_config(self):
def _get_id_main_object(self):
return None
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("lockStartLock")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("door_link").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
door_pos = self._get_state_rand_vec()
self.model.body("door").pos = door_pos
@@ -99,14 +106,19 @@ def reset_model(self):
self._target_pos = self.obj_init_pos + np.array([0.0, -0.04, -0.1])
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
obj = obs[4:7]
tcp = self.get_body_com("leftpad")
scale = np.array([0.25, 1.0, 0.5])
- tcp_to_obj = np.linalg.norm((obj - tcp) * scale)
- tcp_to_obj_init = np.linalg.norm((obj - self.init_left_pad) * scale)
+ tcp_to_obj = float(np.linalg.norm((obj - tcp) * scale))
+ tcp_to_obj_init = float(np.linalg.norm((obj - self.init_left_pad) * scale))
obj_to_target = abs(self._target_pos[2] - obj[2])
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_unlock_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_unlock_v2.py
index ed18e6bfb..694225dec 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_unlock_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_unlock_v2.py
@@ -1,16 +1,22 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerDoorUnlockEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.15)
@@ -19,15 +25,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.2, 0.7, 0.2111)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.85, 0.15]),
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
}
@@ -38,17 +41,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._lock_length = 0.1
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_door_lock.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -71,7 +75,10 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [
("goal_unlock", self._target_pos),
("goal_lock", np.array([10.0, 10.0, 10.0])),
@@ -80,30 +87,35 @@ def _target_site_config(self):
def _get_id_main_object(self):
return None
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("lockStartUnlock")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("door_link").xquat
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.model.body("door").pos = self._get_state_rand_vec()
- self._set_obj_xyz(1.5708)
+ self._set_obj_xyz(np.array(1.5708))
self.obj_init_pos = self.data.body("lock_link").xpos
self._target_pos = self.obj_init_pos + np.array([0.1, -0.04, 0.0])
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
gripper = obs[:3]
lock = obs[4:7]
@@ -119,13 +131,13 @@ def compute_reward(self, action, obs):
# end in itself. Make sure to devalue it compared to the value of
# actually unlocking the lock
ready_to_push = reward_utils.tolerance(
- np.linalg.norm(shoulder_to_lock),
+ float(np.linalg.norm(shoulder_to_lock)),
bounds=(0, 0.02),
margin=np.linalg.norm(shoulder_to_lock_init),
sigmoid="long_tail",
)
- obj_to_target = abs(self._target_pos[0] - lock[0])
+ obj_to_target = abs(float(self._target_pos[0] - lock[0]))
pushed = reward_utils.tolerance(
obj_to_target,
bounds=(0, 0.005),
@@ -137,7 +149,7 @@ def compute_reward(self, action, obs):
return (
reward,
- np.linalg.norm(shoulder_to_lock),
+ float(np.linalg.norm(shoulder_to_lock)),
obs[3],
obj_to_target,
ready_to_push,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_v2.py
index 5901361f0..1edd403ee 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_door_v2.py
@@ -1,17 +1,23 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerDoorEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0.0, 0.85, 0.15)
@@ -20,16 +26,13 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (-0.2, 0.5, 0.1501)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
- "obj_init_angle": np.array([0.3]),
+ self.init_config: InitConfigDict = {
+ "obj_init_angle": 0.3,
"obj_init_pos": np.array([0.1, 0.95, 0.15]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
}
@@ -43,17 +46,19 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.door_qvel_adr = self.model.joint("doorjoint").dofadr.item()
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_door_pull.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
+ assert self._target_pos is not None
(
reward,
reward_grab,
@@ -76,25 +81,25 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("handle").xpos.copy()
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return Rotation.from_matrix(
self.data.geom("handle").xmat.reshape(3, 3)
).as_quat()
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.copy()
qvel = self.data.qvel.copy()
qpos[self.door_qpos_adr] = pos
qvel[self.door_qvel_adr] = 0
self.set_state(qpos.flatten(), qvel.flatten())
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.objHeight = self.data.geom("handle").xpos[2]
@@ -103,20 +108,21 @@ def reset_model(self):
self.model.body("door").pos = self.obj_init_pos
self.model.site("goal").pos = self._target_pos
- self._set_obj_xyz(0)
+ self._set_obj_xyz(np.array(0))
+ assert self._target_pos is not None
self.maxPullDist = np.linalg.norm(
self.data.geom("handle").xpos[:-1] - self._target_pos[:-1]
)
self.target_reward = 1000 * self.maxPullDist + 1000 * 2
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
@staticmethod
- def _reward_grab_effort(actions):
- return (np.clip(actions[3], -1, 1) + 1.0) / 2.0
+ def _reward_grab_effort(actions: npt.NDArray[Any]) -> float:
+ return float((np.clip(actions[3], -1, 1) + 1.0) / 2.0)
@staticmethod
- def _reward_pos(obs, theta):
+ def _reward_pos(obs: npt.NDArray[Any], theta: float) -> tuple[float, float]:
hand = obs[:3]
door = obs[4:7] + np.array([-0.05, 0, 0])
@@ -141,7 +147,7 @@ def _reward_pos(obs, theta):
)
# move the hand to a position between the handle and the main door body
in_place = reward_utils.tolerance(
- np.linalg.norm(hand - door - np.array([0.05, 0.03, -0.01])),
+ float(np.linalg.norm(hand - door - np.array([0.05, 0.03, -0.01]))),
bounds=(0, threshold / 2.0),
margin=0.5,
sigmoid="long_tail",
@@ -161,8 +167,13 @@ def _reward_pos(obs, theta):
return ready_to_open, opened
- def compute_reward(self, actions, obs):
- theta = self.data.joint("doorjoint").qpos
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+ theta = float(self.data.joint("doorjoint").qpos.item())
reward_grab = SawyerDoorEnvV2._reward_grab_effort(actions)
reward_steps = SawyerDoorEnvV2._reward_pos(obs, theta)
@@ -175,7 +186,6 @@ def compute_reward(self, actions, obs):
)
# Override reward on success flag
- reward = reward[0]
if abs(obs[4] - self._target_pos[0]) <= 0.08:
reward = 10.0
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_close_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_close_v2.py
index 6fdd3ee3c..123e001af 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_close_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_close_v2.py
@@ -1,40 +1,37 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerDrawerCloseEnvV2(SawyerXYZEnv):
- _TARGET_RADIUS = 0.04
+ _TARGET_RADIUS: float = 0.04
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.9, 0.0)
obj_high = (0.1, 0.9, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
- "obj_init_angle": np.array(
- [
- 0.3,
- ],
- dtype=np.float32,
- ),
+ self.init_config: InitConfigDict = {
+ "obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.9, 0.0], dtype=np.float32),
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
}
@@ -46,20 +43,21 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.maxDist = 0.15
self.target_reward = 1000 * self.maxDist + 1000 * 2
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_drawer.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -81,37 +79,40 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("drawer_link") + np.array([0.0, -0.16, 0.05])
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.zeros(4)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
# Compute nightstand position
self.obj_init_pos = self._get_state_rand_vec()
# Set mujoco body to computed position
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "drawer")
- ] = self.obj_init_pos
+ self.model.body("drawer").pos = self.obj_init_pos
# Set _target_pos to current drawer position (closed)
self._target_pos = self.obj_init_pos + np.array([0.0, -0.16, 0.09])
# Pull drawer out all the way and mark its starting position
- self._set_obj_xyz(-self.maxDist)
+ self._set_obj_xyz(np.array(-self.maxDist))
self.obj_init_pos = self._get_pos_objects()
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None and self.hand_init_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
obj = obs[4:7]
tcp = self.tcp_center
@@ -130,7 +131,7 @@ def compute_reward(self, action, obs):
)
handle_reach_radius = 0.005
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
tcp_to_obj_init = np.linalg.norm(self.obj_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_open_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_open_v2.py
index 67daebd50..638794291 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_open_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_drawer_open_v2.py
@@ -1,38 +1,35 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerDrawerOpenEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.9, 0.0)
obj_high = (0.1, 0.9, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
- "obj_init_angle": np.array(
- [
- 0.3,
- ],
- dtype=np.float32,
- ),
+ self.init_config: InitConfigDict = {
+ "obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.9, 0.0], dtype=np.float32),
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
}
@@ -44,20 +41,21 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.maxDist = 0.2
self.target_reward = 1000 * self.maxDist + 1000 * 2
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_drawer.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
gripper_error,
@@ -79,39 +77,41 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("objGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("objGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("drawer_link") + np.array([0.0, -0.16, 0.0])
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("drawer_link").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.prev_obs = self._get_curr_obs_combined_no_goal()
# Compute nightstand position
self.obj_init_pos = self._get_state_rand_vec()
# Set mujoco body to computed position
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "drawer")
- ] = self.obj_init_pos
+ self.model.body("drawer").pos = self.obj_init_pos
# Set _target_pos to current drawer position (closed) minus an offset
self._target_pos = self.obj_init_pos + np.array(
[0.0, -0.16 - self.maxDist, 0.09]
)
- mujoco.mj_forward(self.model, self.data)
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
gripper = obs[:3]
handle = obs[4:7]
- handle_error = np.linalg.norm(handle - self._target_pos)
+ handle_error = float(np.linalg.norm(handle - self._target_pos))
reward_for_opening = reward_utils.tolerance(
handle_error, bounds=(0, 0.02), margin=self.maxDist, sigmoid="long_tail"
@@ -128,7 +128,7 @@ def compute_reward(self, action, obs):
gripper_error_init = (handle_pos_init - self.init_tcp) * scale
reward_for_caging = reward_utils.tolerance(
- np.linalg.norm(gripper_error),
+ float(np.linalg.norm(gripper_error)),
bounds=(0, 0.01),
margin=np.linalg.norm(gripper_error_init),
sigmoid="long_tail",
@@ -139,7 +139,7 @@ def compute_reward(self, action, obs):
return (
reward,
- np.linalg.norm(handle - gripper),
+ float(np.linalg.norm(handle - gripper)),
obs[3],
handle_error,
reward_for_caging,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_close_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_close_v2.py
index 6a14b03e2..8ce002515 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_close_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_close_v2.py
@@ -1,34 +1,37 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerFaucetCloseEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.8, 0.0)
obj_high = (0.1, 0.85, 0.0)
self._handle_length = 0.175
- self._target_radius = 0.07
+ self._target_radius: float = 0.07
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.8, 0.0]),
"hand_init_pos": np.array([0.0, 0.4, 0.2]),
}
@@ -39,17 +42,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_faucet.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -72,39 +76,46 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [
("goal_close", self._target_pos),
("goal_open", np.array([10.0, 10.0, 10.0])),
]
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("faucetBase").xquat
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleStartClose") + np.array([0.0, 0.0, -0.01])
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
# Compute faucet position
self.obj_init_pos = self._get_state_rand_vec()
# Set mujoco body to computed position
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "faucetBase")
- ] = self.obj_init_pos
+ self.model.body("faucetBase").pos = self.obj_init_pos
self._target_pos = self.obj_init_pos + np.array(
[-self._handle_length, 0.0, 0.125]
)
mujoco.mj_forward(self.model, self.data)
+ self.model.site("goal_close").pos = self._target_pos
return self._get_obs()
- def _reset_hand(self):
- super()._reset_hand()
+ def _reset_hand(self, steps: int = 50) -> None:
+ super()._reset_hand(steps=steps)
self.reachCompleted = False
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
obj = obs[4:7]
tcp = self.tcp_center
target = self._target_pos.copy()
@@ -122,7 +133,7 @@ def compute_reward(self, action, obs):
)
faucet_reach_radius = 0.01
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
tcp_to_obj_init = np.linalg.norm(self.obj_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_open_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_open_v2.py
index 400e0270a..e9d8d4b6d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_open_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_faucet_open_v2.py
@@ -1,34 +1,36 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerFaucetOpenEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.8, 0.0)
obj_high = (0.05, 0.85, 0.0)
self._handle_length = 0.175
- self._target_radius = 0.07
+ self._target_radius: float = 0.07
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.8, 0.0]),
"hand_init_pos": np.array([0.0, 0.4, 0.2]),
}
@@ -39,17 +41,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_faucet.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -72,39 +75,45 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `_target_site_config`."
return [
("goal_open", self._target_pos),
("goal_close", np.array([10.0, 10.0, 10.0])),
]
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleStartOpen") + np.array([0.0, 0.0, -0.01])
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("faucetBase").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
# Compute faucet position
self.obj_init_pos = self._get_state_rand_vec()
# Set mujoco body to computed position
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "faucetBase")
- ] = self.obj_init_pos
+ self.model.body("faucetBase").pos = self.obj_init_pos
self._target_pos = self.obj_init_pos + np.array(
[+self._handle_length, 0.0, 0.125]
)
- mujoco.mj_forward(self.model, self.data)
+ self.model.site("goal_open").pos = self._target_pos
return self._get_obs()
- def _reset_hand(self):
- super()._reset_hand()
+ def _reset_hand(self, steps: int = 50) -> None:
+ super()._reset_hand(steps=steps)
self.reachCompleted = False
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del action
obj = obs[4:7] + np.array([-0.04, 0.0, 0.03])
tcp = self.tcp_center
@@ -123,7 +132,7 @@ def compute_reward(self, action, obs):
)
faucet_reach_radius = 0.01
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
tcp_to_obj_init = np.linalg.norm(self.obj_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py
index 620d66175..b550520fb 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hammer_v2.py
@@ -1,19 +1,24 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import HammerInitConfigDict
class SawyerHammerEnvV2(SawyerXYZEnv):
HAMMER_HANDLE_LENGTH = 0.14
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.4, 0.0)
@@ -22,15 +27,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.2401, 0.7401, 0.111)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: HammerInitConfigDict = {
"hammer_init_pos": np.array([0, 0.5, 0.0]),
"hand_init_pos": np.array([0, 0.4, 0.2]),
}
@@ -38,17 +40,21 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.hammer_init_pos = self.init_config["hammer_init_pos"]
self.obj_init_pos = self.hammer_init_pos.copy()
self.hand_init_pos = self.init_config["hand_init_pos"]
- self.nail_init_pos = None
+ self.nail_init_pos: npt.NDArray[Any] | None = None
- self._random_reset_space = Box(np.array(obj_low), np.array(obj_high))
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self._random_reset_space = Box(
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
+ )
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_hammer.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
reward_grab,
@@ -69,33 +75,31 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("HammerHandle")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("HammerHandle")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return np.hstack(
(self.get_body_com("hammer").copy(), self.get_body_com("nail_link").copy())
)
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.hstack(
(self.data.body("hammer").xquat, self.data.body("nail_link").xquat)
)
- def _set_hammer_xyz(self, pos):
+ def _set_hammer_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
# Set position of box & nail (these are not randomized)
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = np.array([0.24, 0.85, 0.0])
+ self.model.body("box").pos = np.array([0.24, 0.85, 0.0])
# Update _target_pos
self._target_pos = self._get_site_pos("goal")
@@ -104,15 +108,14 @@ def reset_model(self):
self.nail_init_pos = self._get_site_pos("nailHead")
self.obj_init_pos = self.hammer_init_pos.copy()
self._set_hammer_xyz(self.hammer_init_pos)
-
return self._get_obs()
@staticmethod
- def _reward_quat(obs):
+ def _reward_quat(obs: npt.NDArray[np.float64]) -> float:
# Ideal laid-down wrench has quat [1, 0, 0, 0]
# Rather than deal with an angle between quaternions, just approximate:
ideal = np.array([1.0, 0.0, 0.0, 0.0])
- error = np.linalg.norm(obs[7:11] - ideal)
+ error = float(np.linalg.norm(obs[7:11] - ideal).item())
return max(1.0 - error / 0.4, 0.0)
@staticmethod
@@ -131,7 +134,9 @@ def _reward_pos(hammer_head, target_pos):
return in_place
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, bool]:
hand = obs[:3]
hammer = obs[4:7]
hammer_head = hammer + np.array([0.16, 0.06, 0.0])
@@ -161,7 +166,7 @@ def compute_reward(self, actions, obs):
reward = (2.0 * reward_grab + 6.0 * reward_in_place) * reward_quat
# Override reward on success. We check that reward is above a threshold
# because this env's success metric could be hacked easily
- success = self.data.joint("NailSlideJoint").qpos > 0.09
+ success = bool(self.data.joint("NailSlideJoint").qpos > 0.09)
if success and reward > 5.0:
reward = 10.0
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hand_insert_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hand_insert_v2.py
index 1a64fee97..bd0ba298f 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hand_insert_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_hand_insert_v2.py
@@ -1,18 +1,24 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerHandInsertEnvV2(SawyerXYZEnv):
- TARGET_RADIUS = 0.05
+ TARGET_RADIUS: float = 0.05
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.05)
@@ -21,15 +27,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.04, 0.88, -0.0199)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.6, 0.05]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
@@ -42,15 +45,20 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_table_with_hole.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
+ assert self.obj_init_pos is not None
+
obj = obs[4:7]
(
@@ -78,17 +86,16 @@ def evaluate_state(self, obs, action):
return reward, info
- @property
- def _get_id_main_object(self):
+ def _get_id_main_object(self) -> int:
return self.model.geom("objGeom").id
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("obj").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.prev_obs = self._get_curr_obs_combined_no_goal()
self.obj_init_angle = self.init_config["obj_init_angle"]
@@ -97,17 +104,24 @@ def reset_model(self):
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.15:
goal_pos = self._get_state_rand_vec()
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
+ assert self.obj_init_pos is not None
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
self._target_pos = goal_pos[-3:]
self._set_obj_xyz(self.obj_init_pos)
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
obj = obs[4:7]
- target_to_obj = np.linalg.norm(obj - self._target_pos)
- target_to_obj_init = np.linalg.norm(self.obj_init_pos - self._target_pos)
+ target_to_obj = float(np.linalg.norm(obj - self._target_pos))
+ target_to_obj_init = float(np.linalg.norm(self.obj_init_pos - self._target_pos))
in_place = reward_utils.tolerance(
target_to_obj,
@@ -128,7 +142,7 @@ def compute_reward(self, action, obs):
reward = reward_utils.hamacher_product(object_grasped, in_place)
tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
if tcp_to_obj < 0.02 and tcp_opened > 0:
reward += 1.0 + 7.0 * in_place
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_side_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_side_v2.py
index 2d689a333..682301843 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_side_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_side_v2.py
@@ -1,13 +1,15 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerHandlePressSideEnvV2(SawyerXYZEnv):
@@ -24,24 +26,24 @@ class SawyerHandlePressSideEnvV2(SawyerXYZEnv):
- (6/30/20) Increased goal's Z coordinate by 0.01 in XML
"""
- TARGET_RADIUS = 0.02
+ TARGET_RADIUS: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1.0, 0.5)
obj_low = (-0.35, 0.65, -0.001)
obj_high = (-0.25, 0.75, 0.001)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([-0.3, 0.7, 0.0]),
"hand_init_pos": np.array(
(0, 0.6, 0.2),
@@ -55,17 +57,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_handle_press_sideways.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -74,7 +77,6 @@ def evaluate_state(self, obs, action):
object_grasped,
in_place,
) = self.compute_reward(action, obs)
-
info = {
"success": float(target_to_obj <= self.TARGET_RADIUS),
"near_object": float(tcp_to_obj <= 0.05),
@@ -88,44 +90,47 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleStart")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.zeros(4)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self._get_state_rand_vec()
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
- self._set_obj_xyz(-0.001)
+ self.model.body("box").pos = self.obj_init_pos
+ self._set_obj_xyz(np.array(-0.001))
self._target_pos = self._get_site_pos("goalPress")
self._handle_init_pos = self._get_pos_objects()
return self._get_obs()
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del actions
obj = self._get_pos_objects()
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = obj[2] - target[2]
- target_to_obj = np.linalg.norm(target_to_obj)
+ target_to_obj = float(np.linalg.norm(target_to_obj))
target_to_obj_init = self._handle_init_pos[2] - target[2]
target_to_obj_init = np.linalg.norm(target_to_obj_init)
@@ -137,7 +142,7 @@ def compute_reward(self, actions, obs):
)
handle_radius = 0.02
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
tcp_to_obj_init = np.linalg.norm(self._handle_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
@@ -149,6 +154,6 @@ def compute_reward(self, actions, obs):
object_grasped = reach
reward = reward_utils.hamacher_product(reach, in_place)
- reward = 1 if target_to_obj <= self.TARGET_RADIUS else reward
+ reward = 1.0 if target_to_obj <= self.TARGET_RADIUS else reward
reward *= 10
return (reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_v2.py
index cd8004b53..76c8e1181 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_press_v2.py
@@ -1,19 +1,24 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerHandlePressEnvV2(SawyerXYZEnv):
- TARGET_RADIUS = 0.02
+ TARGET_RADIUS: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1.0, 0.5)
obj_low = (-0.1, 0.8, -0.001)
@@ -22,15 +27,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.70, 0.08)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.9, 0.0]),
"hand_init_pos": np.array(
(0, 0.6, 0.2),
@@ -41,17 +43,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_handle_press.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -74,43 +77,43 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleStart")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.zeros(4)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self._get_state_rand_vec()
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
- self._set_obj_xyz(-0.001)
+ self.model.body("box").pos = self.obj_init_pos
+ self._set_obj_xyz(np.array(-0.001))
self._target_pos = self._get_site_pos("goalPress")
self.maxDist = np.abs(
- self.data.site_xpos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "handleStart")
- ][-1]
- - self._target_pos[-1]
+ self.data.site("handleStart").xpos[-1] - self._target_pos[-1]
)
self.target_reward = 1000 * self.maxDist + 1000 * 2
self._handle_init_pos = self._get_pos_objects()
return self._get_obs()
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
del actions
obj = self._get_pos_objects()
tcp = self.tcp_center
@@ -129,7 +132,7 @@ def compute_reward(self, actions, obs):
)
handle_radius = 0.02
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
tcp_to_obj_init = np.linalg.norm(self._handle_init_pos - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
@@ -141,6 +144,6 @@ def compute_reward(self, actions, obs):
object_grasped = reach
reward = reward_utils.hamacher_product(reach, in_place)
- reward = 1 if target_to_obj <= self.TARGET_RADIUS else reward
+ reward = 1.0 if target_to_obj <= self.TARGET_RADIUS else reward
reward *= 10
return (reward, tcp_to_obj, tcp_opened, target_to_obj, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_side_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_side_v2.py
index ab663dff4..67f5a013c 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_side_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_side_v2.py
@@ -1,32 +1,34 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerHandlePullSideEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1.0, 0.5)
obj_low = (-0.35, 0.65, 0.0)
obj_high = (-0.25, 0.75, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([-0.3, 0.7, 0.0]),
"hand_init_pos": np.array(
(0, 0.6, 0.2),
@@ -40,17 +42,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_handle_press_sideways.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -61,6 +64,7 @@ def evaluate_state(self, obs, action):
in_place_reward,
) = self.compute_reward(action, obs)
+ assert self.obj_init_pos is not None
info = {
"success": float(obj_to_target <= 0.08),
"near_object": float(tcp_to_obj <= 0.05),
@@ -76,43 +80,43 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleCenter")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.zeros(4)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self._get_state_rand_vec()
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
- self._set_obj_xyz(-0.1)
+ self.model.body("box").pos = self.obj_init_pos
+ self._set_obj_xyz(np.array(-0.1))
self._target_pos = self._get_site_pos("goalPull")
self.maxDist = np.abs(
- self.data.site_xpos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "handleStart")
- ][-1]
- - self._target_pos[-1]
+ self.data.site("handleStart").xpos[-1] - self._target_pos[-1]
)
self.target_reward = 1000 * self.maxDist + 1000 * 2
self.obj_init_pos = self._get_pos_objects()
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None and self.obj_init_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
obj = obs[4:7]
# Force target to be slightly above basketball hoop
target = self._target_pos.copy()
@@ -144,7 +148,7 @@ def compute_reward(self, action, obs):
# reward = in_place
tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
if (
tcp_to_obj < 0.035
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_v2.py
index 622eba505..8839b0ef2 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_handle_pull_v2.py
@@ -1,17 +1,22 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerHandlePullEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1.0, 0.5)
obj_low = (-0.1, 0.8, -0.001)
@@ -20,15 +25,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.70, 0.18)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.9, 0.0]),
"hand_init_pos": np.array(
(0, 0.6, 0.2),
@@ -39,17 +41,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_handle_press.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -60,6 +63,7 @@ def evaluate_state(self, obs, action):
in_place_reward,
) = self.compute_reward(action, obs)
+ assert self.obj_init_pos is not None
info = {
"success": float(obj_to_target <= self.TARGET_RADIUS),
"near_object": float(tcp_to_obj <= 0.05),
@@ -75,35 +79,38 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
return []
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleRight")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.zeros(4)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9] = pos
qvel[9] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self._get_state_rand_vec()
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = self.obj_init_pos
- self._set_obj_xyz(-0.1)
+ self.model.body("box").pos = self.obj_init_pos
+ self._set_obj_xyz(np.array(-0.1))
self._target_pos = self._get_site_pos("goalPull")
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self.obj_init_pos is not None and self._target_pos is not None
+ ), "`reset_model()` should be called before `compute_reward()`"
obj = obs[4:7]
# Force target to be slightly above basketball hoop
target = self._target_pos.copy()
@@ -130,7 +137,7 @@ def compute_reward(self, action, obs):
reward = reward_utils.hamacher_product(object_grasped, in_place)
tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
if (
tcp_to_obj < 0.035
and tcp_opened > 0
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_lever_pull_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_lever_pull_v2.py
index b4c385e81..6ae10a525 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_lever_pull_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_lever_pull_v2.py
@@ -1,14 +1,17 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerLeverPullEnvV2(SawyerXYZEnv):
@@ -27,22 +30,22 @@ class SawyerLeverPullEnvV2(SawyerXYZEnv):
LEVER_RADIUS = 0.2
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, -0.15)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.7, 0.0)
obj_high = (0.1, 0.8, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.7, 0.0]),
"hand_init_pos": np.array([0, 0.4, 0.2], dtype=np.float32),
}
@@ -55,17 +58,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_lever_pull.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
shoulder_to_lever,
@@ -86,17 +90,17 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("objGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("objGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("leverStart")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("objGeom").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self._get_state_rand_vec()
self.model.body_pos[
@@ -108,10 +112,13 @@ def reset_model(self):
self._target_pos = self.obj_init_pos + np.array(
[0.12, 0.0, 0.25 + self.LEVER_RADIUS]
)
- mujoco.mj_forward(self.model, self.data)
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float]:
+ assert self._lever_pos_init is not None
gripper = obs[:3]
lever = obs[4:7]
@@ -129,7 +136,7 @@ def compute_reward(self, action, obs):
# end in itself. Make sure to devalue it compared to the value of
# actually lifting the lever
ready_to_lift = reward_utils.tolerance(
- np.linalg.norm(shoulder_to_lever),
+ float(np.linalg.norm(shoulder_to_lever)),
bounds=(0, 0.02),
margin=np.linalg.norm(shoulder_to_lever_init),
sigmoid="long_tail",
@@ -138,7 +145,7 @@ def compute_reward(self, action, obs):
# The skill of the agent should be measured by its ability to get the
# lever to point straight upward. This means we'll be measuring the
# current angle of the lever's joint, and comparing with 90deg.
- lever_angle = -self.data.joint("LeverAxis").qpos
+ lever_angle = float(-self.data.joint("LeverAxis").qpos.item())
lever_angle_desired = np.pi / 2.0
lever_error = abs(lever_angle - lever_angle_desired)
@@ -154,8 +161,8 @@ def compute_reward(self, action, obs):
)
target = self._target_pos
- obj_to_target = np.linalg.norm(lever - target)
- in_place_margin = np.linalg.norm(self._lever_pos_init - target)
+ obj_to_target = float(np.linalg.norm(lever - target))
+ in_place_margin = float(np.linalg.norm(self._lever_pos_init - target))
in_place = reward_utils.tolerance(
obj_to_target,
@@ -168,7 +175,7 @@ def compute_reward(self, action, obs):
reward = 10.0 * reward_utils.hamacher_product(ready_to_lift, in_place)
return (
reward,
- np.linalg.norm(shoulder_to_lever),
+ float(np.linalg.norm(shoulder_to_lever)),
ready_to_lift,
lever_error,
lever_engagement,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_insertion_side_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_insertion_side_v2.py
index 4bf4a41da..ad40fdd01 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_insertion_side_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_insertion_side_v2.py
@@ -1,18 +1,20 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPegInsertionSideEnvV2(SawyerXYZEnv):
- TARGET_RADIUS = 0.07
+ TARGET_RADIUS: float = 0.07
"""
Motivation for V2:
V1 was difficult to solve because the observation didn't say where
@@ -30,7 +32,10 @@ class SawyerPegInsertionSideEnvV2(SawyerXYZEnv):
the hole's position, as opposed to hand_low and hand_high
"""
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_init_pos = (0, 0.6, 0.2)
hand_low = (-0.5, 0.40, 0.05)
@@ -41,15 +46,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (-0.25, 0.7, 0.001)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.6, 0.02]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
}
@@ -64,18 +66,22 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
self.goal_space = Box(
np.array(goal_low) + np.array([0.03, 0.0, 0.13]),
np.array(goal_high) + np.array([0.03, 0.0, 0.13]),
+ dtype=np.float64,
)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_peg_insertion_side.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
@@ -88,6 +94,7 @@ def evaluate_state(self, obs, action):
collision_box_front,
ip_orig,
) = self.compute_reward(action, obs)
+ assert self.obj_init_pos is not None
grasp_success = float(
tcp_to_obj < 0.02
and (tcp_open > 0)
@@ -108,14 +115,14 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("pegGrasp")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.site("pegGrasp").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
pos_peg, pos_box = np.split(self._get_state_rand_vec(), 2)
while np.linalg.norm(pos_peg[:2] - pos_box[:2]) < 0.1:
@@ -123,24 +130,28 @@ def reset_model(self):
self.obj_init_pos = pos_peg
self.peg_head_pos_init = self._get_site_pos("pegHead")
self._set_obj_xyz(self.obj_init_pos)
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = pos_box
+ self.model.body("box").pos = pos_box
self._target_pos = pos_box + np.array([0.03, 0.0, 0.13])
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
tcp = self.tcp_center
obj = obs[4:7]
obj_head = self._get_site_pos("pegHead")
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
target = self._target_pos
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
scale = np.array([1.0, 2.0, 2.0])
# force agent to pick up object then insert
- obj_to_target = np.linalg.norm((obj_head - target) * scale)
+ obj_to_target = float(np.linalg.norm((obj_head - target) * scale))
- in_place_margin = np.linalg.norm((self.peg_head_pos_init - target) * scale)
+ in_place_margin = float(
+ np.linalg.norm((self.peg_head_pos_init - target) * scale)
+ )
in_place = reward_utils.tolerance(
obj_to_target,
bounds=(0, self.TARGET_RADIUS),
@@ -199,7 +210,7 @@ def compute_reward(self, action, obs):
if obj_to_target <= 0.07:
reward = 10.0
- return [
+ return (
reward,
tcp_to_obj,
tcp_opened,
@@ -208,4 +219,4 @@ def compute_reward(self, action, obs):
in_place,
collision_boxes,
ip_orig,
- ]
+ )
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_unplug_side_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_unplug_side_v2.py
index 23cea6a83..20bacc803 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_unplug_side_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_peg_unplug_side_v2.py
@@ -1,17 +1,22 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPegUnplugSideEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.25, 0.6, -0.001)
@@ -20,15 +25,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = obj_high + np.array([0.194, 0.0, 0.131])
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([-0.225, 0.6, 0.05]),
"hand_init_pos": np.array((0, 0.6, 0.2)),
}
@@ -37,17 +39,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.hand_init_pos = self.init_config["hand_init_pos"]
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_peg_unplug_side.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
# obj = obs[4:7]
(
@@ -74,13 +77,13 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("pegEnd")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("plug1").xquat
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos
@@ -88,28 +91,29 @@ def _set_obj_xyz(self, pos):
qvel[9:12] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
pos_box = self._get_state_rand_vec()
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "box")
- ] = pos_box
+ self.model.body("box").pos = pos_box
pos_plug = pos_box + np.array([0.044, 0.0, 0.131])
self._set_obj_xyz(pos_plug)
self.obj_init_pos = self._get_site_pos("pegEnd")
self._target_pos = pos_plug + np.array([0.15, 0.0, 0.0])
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
tcp = self.tcp_center
obj = obs[4:7]
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
target = self._target_pos
- tcp_to_obj = np.linalg.norm(obj - tcp)
- obj_to_target = np.linalg.norm(obj - target)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ obj_to_target = float(np.linalg.norm(obj - target))
pad_success_margin = 0.05
object_reach_radius = 0.01
x_z_margin = 0.005
@@ -125,7 +129,7 @@ def compute_reward(self, action, obs):
desired_gripper_effort=0.8,
high_density=True,
)
- in_place_margin = np.linalg.norm(self.obj_init_pos - target)
+ in_place_margin = float(np.linalg.norm(self.obj_init_pos - target))
in_place = reward_utils.tolerance(
obj_to_target,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_out_of_hole_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_out_of_hole_v2.py
index 209c9e77b..e0d54c9e8 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_out_of_hole_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_out_of_hole_v2.py
@@ -1,18 +1,24 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPickOutOfHoleEnvV2(SawyerXYZEnv):
- _TARGET_RADIUS = 0.02
+ _TARGET_RADIUS: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, -0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (0, 0.75, 0.02)
@@ -21,15 +27,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.6, 0.3)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.6, 0.0]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0.0, 0.6, 0.2]),
@@ -42,15 +45,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_pick_out_of_hole.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -77,23 +83,22 @@ def evaluate_state(self, obs, action):
return reward, info
@property
- def _target_site_config(self):
- l = [("goal", self.init_right_pad)]
+ def _target_site_config(self) -> list[tuple[str, npt.NDArray[Any]]]:
+ _site_config = [("goal", self.init_right_pad)]
if self.obj_init_pos is not None:
- l[0] = ("goal", self.obj_init_pos)
- return l
+ _site_config[0] = ("goal", self.obj_init_pos)
+ return _site_config
- @property
- def _get_id_main_object(self):
- return self.unwrapped.model.geom_name2id("objGeom")
+ def _get_id_main_object(self) -> int:
+ return self.model.geom_name2id("objGeom")
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("obj").xquat
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
pos_obj, pos_goal = np.split(self._get_state_rand_vec(), 2)
@@ -103,20 +108,23 @@ def reset_model(self):
self.obj_init_pos = pos_obj
self._set_obj_xyz(self.obj_init_pos)
self._target_pos = pos_goal
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
obj = obs[4:7]
gripper = self.tcp_center
- obj_to_target = np.linalg.norm(obj - self._target_pos)
- tcp_to_obj = np.linalg.norm(obj - gripper)
- in_place_margin = np.linalg.norm(self.obj_init_pos - self._target_pos)
+ obj_to_target = float(np.linalg.norm(obj - self._target_pos))
+ tcp_to_obj = float(np.linalg.norm(obj - gripper))
+ in_place_margin = float(np.linalg.norm(self.obj_init_pos - self._target_pos))
threshold = 0.03
# floor is a 3D funnel centered on the initial object pos
- radius = np.linalg.norm(gripper[:2] - self.obj_init_pos[:2])
+ radius = float(np.linalg.norm(gripper[:2] - self.obj_init_pos[:2]))
if radius <= threshold:
floor = 0.0
else:
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_v2.py
index 304082791..cdd8412b0 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_v2.py
@@ -1,13 +1,16 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPickPlaceEnvV2(SawyerXYZEnv):
@@ -25,7 +28,10 @@ class SawyerPickPlaceEnvV2(SawyerXYZEnv):
- (6/15/20) Separated reach-push-pick-place into 3 separate envs.
"""
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.1, 0.8, 0.05)
goal_high = (0.1, 0.9, 0.3)
hand_low = (-0.5, 0.40, 0.05)
@@ -34,15 +40,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.1, 0.7, 0.02)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.6, 0.02]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
@@ -57,18 +60,21 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.num_resets = 0
self.obj_init_pos = None
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_pick_place_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
@@ -81,6 +87,7 @@ def evaluate_state(self, obs, action):
) = self.compute_reward(action, obs)
success = float(obj_to_target <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
+ assert self.obj_init_pos is not None
grasp_success = float(
self.touching_main_object
and (tcp_open > 0)
@@ -98,19 +105,18 @@ def evaluate_state(self, obs, action):
return reward, info
- @property
- def _get_id_main_object(self):
+ def _get_id_main_object(self) -> int:
return self.data.geom("objGeom").id
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return Rotation.from_matrix(
self.data.geom("objGeom").xmat.reshape(3, 3)
).as_quat()
- def fix_extreme_obj_pos(self, orig_init_pos):
+ def fix_extreme_obj_pos(self, orig_init_pos: npt.NDArray[Any]) -> npt.NDArray[Any]:
# This is to account for meshes for the geom and object are not
# aligned. If this is not done, the object could be initialized in an
# extreme position
@@ -118,9 +124,11 @@ def fix_extreme_obj_pos(self, orig_init_pos):
adjusted_pos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0,
# and geom_pos[2] is the object height
- return [adjusted_pos[0], adjusted_pos[1], self.get_body_com("obj")[-1]]
+ return np.array(
+ [adjusted_pos[0], adjusted_pos[1], self.get_body_com("obj")[-1]]
+ )
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.fix_extreme_obj_pos(self.init_config["obj_init_pos"])
@@ -138,23 +146,34 @@ def reset_model(self):
self.init_right_pad = self.get_body_com("rightpad")
self._set_obj_xyz(self.obj_init_pos)
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def _gripper_caging_reward(self, action, obj_position):
+ def _gripper_caging_reward(
+ self,
+ action: npt.NDArray[np.float32],
+ obj_pos: npt.NDArray[Any],
+ obj_radius: float = 0, # All of these args are unused, just here to match
+ pad_success_thresh: float = 0, # the parent's type signature
+ object_reach_radius: float = 0,
+ xz_thresh: float = 0,
+ desired_gripper_effort: float = 1.0,
+ high_density: bool = False,
+ medium_density: bool = False,
+ ) -> float:
pad_success_margin = 0.05
x_z_success_margin = 0.005
obj_radius = 0.015
tcp = self.tcp_center
left_pad = self.get_body_com("leftpad")
right_pad = self.get_body_com("rightpad")
- delta_object_y_left_pad = left_pad[1] - obj_position[1]
- delta_object_y_right_pad = obj_position[1] - right_pad[1]
+ delta_object_y_left_pad = left_pad[1] - obj_pos[1]
+ delta_object_y_right_pad = obj_pos[1] - right_pad[1]
right_caging_margin = abs(
- abs(obj_position[1] - self.init_right_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_right_pad[1]) - pad_success_margin
)
left_caging_margin = abs(
- abs(obj_position[1] - self.init_left_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_left_pad[1]) - pad_success_margin
)
right_caging = reward_utils.tolerance(
@@ -174,12 +193,11 @@ def _gripper_caging_reward(self, action, obj_position):
# compute the tcp_obj distance in the x_z plane
tcp_xz = tcp + np.array([0.0, -tcp[1], 0.0])
- obj_position_x_z = np.copy(obj_position) + np.array(
- [0.0, -obj_position[1], 0.0]
- )
- tcp_obj_norm_x_z = np.linalg.norm(tcp_xz - obj_position_x_z, ord=2)
+ obj_position_x_z = np.copy(obj_pos) + np.array([0.0, -obj_pos[1], 0.0])
+ tcp_obj_norm_x_z = float(np.linalg.norm(tcp_xz - obj_position_x_z, ord=2))
# used for computing the tcp to object object margin in the x_z plane
+ assert self.obj_init_pos is not None
init_obj_x_z = self.obj_init_pos + np.array([0.0, -self.obj_init_pos[1], 0.0])
init_tcp_x_z = self.init_tcp + np.array([0.0, -self.init_tcp[1], 0.0])
tcp_obj_x_z_margin = (
@@ -201,15 +219,18 @@ def _gripper_caging_reward(self, action, obj_position):
caging_and_gripping = (caging_and_gripping + caging) / 2
return caging_and_gripping
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = self._target_pos
- obj_to_target = np.linalg.norm(obj - target)
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
in_place_margin = np.linalg.norm(self.obj_init_pos - target)
in_place = reward_utils.tolerance(
@@ -233,4 +254,4 @@ def compute_reward(self, action, obs):
reward += 1.0 + 5.0 * in_place
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_wall_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_wall_v2.py
index 654fee547..a1740d04d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_wall_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_pick_place_wall_v2.py
@@ -1,13 +1,16 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPickPlaceWallEnvV2(SawyerXYZEnv):
@@ -26,7 +29,10 @@ class SawyerPickPlaceWallEnvV2(SawyerXYZEnv):
reach-push-pick-place-wall.
"""
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.05, 0.85, 0.05)
goal_high = (0.05, 0.9, 0.3)
hand_low = (-0.5, 0.40, 0.05)
@@ -35,15 +41,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.05, 0.65, 0.015)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.6, 0.02]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
@@ -58,17 +61,20 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.num_resets = 0
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_pick_place_wall_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -81,6 +87,7 @@ def evaluate_state(self, obs, action):
success = float(obj_to_target <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
+ assert self.obj_init_pos is not None
grasp_success = float(
self.touching_main_object
and (tcp_open > 0)
@@ -98,10 +105,10 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("objGeom").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return Rotation.from_matrix(
self.data.geom("objGeom").xmat.reshape(3, 3)
).as_quat()
@@ -115,7 +122,7 @@ def adjust_initObjPos(self, orig_init_pos):
# The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height
return [adjustedPos[0], adjustedPos[1], self.data.geom("objGeom").xpos[-1]]
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config["obj_init_pos"])
@@ -130,27 +137,32 @@ def reset_model(self):
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None and self.obj_init_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
midpoint = np.array([self._target_pos[0], 0.77, 0.25])
target = self._target_pos
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
in_place_scaling = np.array([1.0, 1.0, 3.0])
- obj_to_midpoint = np.linalg.norm((obj - midpoint) * in_place_scaling)
- obj_to_midpoint_init = np.linalg.norm(
- (self.obj_init_pos - midpoint) * in_place_scaling
+ obj_to_midpoint = float(np.linalg.norm((obj - midpoint) * in_place_scaling))
+ obj_to_midpoint_init = float(
+ np.linalg.norm((self.obj_init_pos - midpoint) * in_place_scaling)
)
- obj_to_target = np.linalg.norm(obj - target)
- obj_to_target_init = np.linalg.norm(self.obj_init_pos - target)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ obj_to_target_init = float(np.linalg.norm(self.obj_init_pos - target))
in_place_part1 = reward_utils.tolerance(
obj_to_midpoint,
@@ -193,11 +205,11 @@ def compute_reward(self, action, obs):
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [
+ return (
reward,
tcp_to_obj,
tcp_opened,
- np.linalg.norm(obj - target),
+ float(np.linalg.norm(obj - target)),
object_grasped,
in_place_part2,
- ]
+ )
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_side_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_side_v2.py
index 0d83a526c..48947c6bc 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_side_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_side_v2.py
@@ -1,14 +1,16 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPlateSlideBackSideEnvV2(SawyerXYZEnv):
@@ -27,7 +29,10 @@ class SawyerPlateSlideBackSideEnvV2(SawyerXYZEnv):
- (6/22/20) Cabinet now sits on ground, instead of .02 units above it
"""
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.05, 0.6, 0.015)
goal_high = (0.15, 0.6, 0.015)
hand_low = (-0.5, 0.40, 0.05)
@@ -36,15 +41,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (-0.25, 0.6, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([-0.25, 0.6, 0.02], dtype=np.float32),
"hand_init_pos": np.array((0, 0.6, 0.2), dtype=np.float32),
@@ -57,15 +59,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_plate_slide_sideway.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -89,10 +94,10 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("puck").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("puck").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
@@ -103,13 +108,13 @@ def _get_obs_dict(self):
state_achieved_goal=self._get_pos_objects(),
)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self.init_config["obj_init_pos"]
@@ -118,22 +123,27 @@ def reset_model(self):
rand_vec = self._get_state_rand_vec()
self.obj_init_pos = rand_vec[:3]
self._target_pos = rand_vec[3:]
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "puck_goal")
- ] = self.obj_init_pos
+ self.model.body("puck_goal").pos = self.obj_init_pos
self._set_obj_xyz(np.array([-0.15, 0.0]))
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def compute_reward(self, actions, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert (
+ self._target_pos is not None and self.obj_init_pos is not None
+ ), "`reset_model()` must be called before `compute_reward()`."
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
target = self._target_pos
- obj_to_target = np.linalg.norm(obj - target)
- in_place_margin = np.linalg.norm(self.obj_init_pos - target)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ in_place_margin = float(np.linalg.norm(self.obj_init_pos - target))
in_place = reward_utils.tolerance(
obj_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -141,8 +151,8 @@ def compute_reward(self, actions, obs):
sigmoid="long_tail",
)
- tcp_to_obj = np.linalg.norm(tcp - obj)
- obj_grasped_margin = np.linalg.norm(self.init_tcp - self.obj_init_pos)
+ tcp_to_obj = float(np.linalg.norm(tcp - obj))
+ obj_grasped_margin = float(np.linalg.norm(self.init_tcp - self.obj_init_pos))
object_grasped = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, _TARGET_RADIUS),
@@ -157,4 +167,4 @@ def compute_reward(self, actions, obs):
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_v2.py
index b0e493f88..50867670c 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_back_v2.py
@@ -1,17 +1,23 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPlateSlideBackEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.1, 0.6, 0.015)
goal_high = (0.1, 0.6, 0.015)
hand_low = (-0.5, 0.40, 0.05)
@@ -20,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.0, 0.85, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.85, 0.0], dtype=np.float32),
"hand_init_pos": np.array((0, 0.6, 0.2), dtype=np.float32),
@@ -41,15 +44,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_plate_slide.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -73,20 +79,20 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("puck").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("puck").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self.init_config["obj_init_pos"]
@@ -98,17 +104,22 @@ def reset_model(self):
self.data.body("puck_goal").xpos = self._target_pos
self._set_obj_xyz(np.array([0, 0.15]))
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def compute_reward(self, actions, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = self._target_pos
- obj_to_target = np.linalg.norm(obj - target)
- in_place_margin = np.linalg.norm(self.obj_init_pos - target)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ in_place_margin = float(np.linalg.norm(self.obj_init_pos - target))
in_place = reward_utils.tolerance(
obj_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -116,8 +127,8 @@ def compute_reward(self, actions, obs):
sigmoid="long_tail",
)
- tcp_to_obj = np.linalg.norm(tcp - obj)
- obj_grasped_margin = np.linalg.norm(self.init_tcp - self.obj_init_pos)
+ tcp_to_obj = float(np.linalg.norm(tcp - obj))
+ obj_grasped_margin = float(np.linalg.norm(self.init_tcp - self.obj_init_pos))
object_grasped = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, _TARGET_RADIUS),
@@ -128,8 +139,8 @@ def compute_reward(self, actions, obs):
reward = 1.5 * object_grasped
if tcp[2] <= 0.03 and tcp_to_obj < 0.07:
- reward = 2 + (7 * in_place)
+ reward = 2.0 + (7.0 * in_place)
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_side_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_side_v2.py
index 8ddffcebd..310191223 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_side_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_side_v2.py
@@ -1,17 +1,23 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPlateSlideSideEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.3, 0.54, 0.0)
goal_high = (-0.25, 0.66, 0.0)
hand_low = (-0.5, 0.40, 0.05)
@@ -20,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.0, 0.6, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.6, 0.0], dtype=np.float32),
"hand_init_pos": np.array((0, 0.6, 0.2), dtype=np.float32),
@@ -41,15 +44,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_plate_slide_sideway.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -73,20 +79,20 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("puck").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("puck").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self.init_config["obj_init_pos"]
@@ -98,17 +104,22 @@ def reset_model(self):
self.data.body("puck_goal").xpos = self._target_pos
self._set_obj_xyz(np.zeros(2))
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def compute_reward(self, actions, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = self._target_pos
- obj_to_target = np.linalg.norm(obj - target)
- in_place_margin = np.linalg.norm(self.obj_init_pos - target)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ in_place_margin = float(np.linalg.norm(self.obj_init_pos - target))
in_place = reward_utils.tolerance(
obj_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -116,8 +127,8 @@ def compute_reward(self, actions, obs):
sigmoid="long_tail",
)
- tcp_to_obj = np.linalg.norm(tcp - obj)
- obj_grasped_margin = np.linalg.norm(self.init_tcp - self.obj_init_pos)
+ tcp_to_obj = float(np.linalg.norm(tcp - obj))
+ obj_grasped_margin = float(np.linalg.norm(self.init_tcp - self.obj_init_pos))
object_grasped = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, _TARGET_RADIUS),
@@ -131,8 +142,8 @@ def compute_reward(self, actions, obs):
reward = 1.5 * object_grasped
if tcp[2] <= 0.03 and tcp_to_obj < 0.07:
- reward = 2 + (7 * in_place)
+ reward = 2.0 + (7.0 * in_place)
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py
index 72f15822d..2370d4a9d 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_plate_slide_v2.py
@@ -1,19 +1,25 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPlateSlideEnvV2(SawyerXYZEnv):
- OBJ_RADIUS = 0.04
+ OBJ_RADIUS: float = 0.04
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.1, 0.85, 0.0)
goal_high = (0.1, 0.9, 0.0)
hand_low = (-0.5, 0.40, 0.05)
@@ -22,15 +28,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.0, 0.6, 0.0)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.6, 0.0], dtype=np.float32),
"hand_init_pos": np.array((0, 0.6, 0.2), dtype=np.float32),
@@ -43,15 +46,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_plate_slide.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -75,20 +81,20 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("puck").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("puck").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:11] = pos
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self.init_config["obj_init_pos"]
@@ -102,17 +108,22 @@ def reset_model(self):
self.model.body("puck_goal").pos = self._target_pos
self._set_obj_xyz(np.zeros(2))
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = self._target_pos
- obj_to_target = np.linalg.norm(obj - target)
- in_place_margin = np.linalg.norm(self.obj_init_pos - target)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ in_place_margin = float(np.linalg.norm(self.obj_init_pos - target))
in_place = reward_utils.tolerance(
obj_to_target,
@@ -121,8 +132,8 @@ def compute_reward(self, action, obs):
sigmoid="long_tail",
)
- tcp_to_obj = np.linalg.norm(tcp - obj)
- obj_grasped_margin = np.linalg.norm(self.init_tcp - self.obj_init_pos)
+ tcp_to_obj = float(np.linalg.norm(tcp - obj))
+ obj_grasped_margin = float(np.linalg.norm(self.init_tcp - self.obj_init_pos))
object_grasped = reward_utils.tolerance(
tcp_to_obj,
@@ -138,4 +149,4 @@ def compute_reward(self, action, obs):
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_back_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_back_v2.py
index 12635247e..086e19b8a 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_back_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_back_v2.py
@@ -1,20 +1,26 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPushBackEnvV2(SawyerXYZEnv):
- OBJ_RADIUS = 0.007
- TARGET_RADIUS = 0.05
+ OBJ_RADIUS: float = 0.007
+ TARGET_RADIUS: float = 0.05
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.1, 0.6, 0.0199)
goal_high = (0.1, 0.7, 0.0201)
hand_low = (-0.5, 0.40, 0.05)
@@ -23,15 +29,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.1, 0.85, 0.02)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.8, 0.02]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
@@ -44,15 +47,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_push_back_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -65,8 +71,9 @@ def evaluate_state(self, obs, action):
success = float(target_to_obj <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
+ assert self.obj_init_pos is not None
grasp_success = float(
- self.touching_object
+ self.touching_main_object
and (tcp_opened > 0)
and (obj[2] - 0.02 > self.obj_init_pos[2])
)
@@ -81,43 +88,57 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("objGeom").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return Rotation.from_matrix(
self.data.geom("objGeom").xmat.reshape(3, 3)
).as_quat()
- def adjust_initObjPos(self, orig_init_pos):
+ def adjust_initObjPos(self, orig_init_pos: npt.NDArray[Any]) -> npt.NDArray[Any]:
# This is to account for meshes for the geom and object are not aligned
# If this is not done, the object could be initialized in an extreme position
diff = self.get_body_com("obj")[:2] - self.data.geom("objGeom").xpos[:2]
adjustedPos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height
- return [adjustedPos[0], adjustedPos[1], self.data.geom("objGeom").xpos[-1]]
+ return np.array(
+ [adjustedPos[0], adjustedPos[1], self.data.geom("objGeom").xpos[-1]]
+ )
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config["obj_init_pos"])
self.obj_init_angle = self.init_config["obj_init_angle"]
+ assert self.obj_init_pos is not None
goal_pos = self._get_state_rand_vec()
- self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
+ self._target_pos = np.concatenate([goal_pos[-3:-1], [self.obj_init_pos[-1]]])
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._target_pos = np.concatenate(
- (goal_pos[-3:-1], [self.obj_init_pos[-1]])
+ [goal_pos[-3:-1], [self.obj_init_pos[-1]]]
)
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
self._set_obj_xyz(self.obj_init_pos)
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def _gripper_caging_reward(self, action, obj_position, obj_radius):
+ def _gripper_caging_reward(
+ self,
+ action: npt.NDArray[np.float32],
+ obj_pos: npt.NDArray[Any],
+ obj_radius: float,
+ pad_success_thresh: float = 0, # All of these args are unused
+ object_reach_radius: float = 0, # just here to match the parent's type signature
+ xz_thresh: float = 0,
+ desired_gripper_effort: float = 1.0,
+ high_density: bool = False,
+ medium_density: bool = False,
+ ) -> float:
pad_success_margin = 0.05
grip_success_margin = obj_radius + 0.003
x_z_success_margin = 0.01
@@ -125,13 +146,13 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
tcp = self.tcp_center
left_pad = self.get_body_com("leftpad")
right_pad = self.get_body_com("rightpad")
- delta_object_y_left_pad = left_pad[1] - obj_position[1]
- delta_object_y_right_pad = obj_position[1] - right_pad[1]
+ delta_object_y_left_pad = left_pad[1] - obj_pos[1]
+ delta_object_y_right_pad = obj_pos[1] - right_pad[1]
right_caging_margin = abs(
- abs(obj_position[1] - self.init_right_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_right_pad[1]) - pad_success_margin
)
left_caging_margin = abs(
- abs(obj_position[1] - self.init_left_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_left_pad[1]) - pad_success_margin
)
right_caging = reward_utils.tolerance(
@@ -169,10 +190,9 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
assert y_caging >= 0 and y_caging <= 1
tcp_xz = tcp + np.array([0.0, -tcp[1], 0.0])
- obj_position_x_z = np.copy(obj_position) + np.array(
- [0.0, -obj_position[1], 0.0]
- )
+ obj_position_x_z = np.copy(obj_pos) + np.array([0.0, -obj_pos[1], 0.0])
tcp_obj_norm_x_z = np.linalg.norm(tcp_xz - obj_position_x_z, ord=2)
+ assert self.obj_init_pos is not None
init_obj_x_z = self.obj_init_pos + np.array([0.0, -self.obj_init_pos[1], 0.0])
init_tcp_x_z = self.init_tcp + np.array([0.0, -self.init_tcp[1], 0.0])
@@ -180,7 +200,7 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
np.linalg.norm(init_obj_x_z - init_tcp_x_z, ord=2) - x_z_success_margin
)
x_z_caging = reward_utils.tolerance(
- tcp_obj_norm_x_z,
+ float(tcp_obj_norm_x_z),
bounds=(0, x_z_success_margin),
margin=tcp_obj_x_z_margin,
sigmoid="long_tail",
@@ -203,12 +223,15 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
return caging_and_gripping
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
obj = obs[4:7]
tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
- target_to_obj = np.linalg.norm(obj - self._target_pos)
- target_to_obj_init = np.linalg.norm(self.obj_init_pos - self._target_pos)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
+ target_to_obj = float(np.linalg.norm(obj - self._target_pos))
+ target_to_obj_init = float(np.linalg.norm(self.obj_init_pos - self._target_pos))
in_place = reward_utils.tolerance(
target_to_obj,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py
index 0e08b1243..29ce40595 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_v2.py
@@ -1,13 +1,16 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPushEnvV2(SawyerXYZEnv):
@@ -25,9 +28,12 @@ class SawyerPushEnvV2(SawyerXYZEnv):
- (6/15/20) Separated reach-push-pick-place into 3 separate envs.
"""
- TARGET_RADIUS = 0.05
+ TARGET_RADIUS: float = 0.05
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.02)
@@ -36,15 +42,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.1, 0.9, 0.02)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.6, 0.02]),
"hand_init_pos": np.array([0.0, 0.6, 0.2]),
@@ -56,24 +59,22 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.obj_init_pos = self.init_config["obj_init_pos"]
self.hand_init_pos = self.init_config["hand_init_pos"]
- self.action_space = Box(
- np.array([-1, -1, -1, -1]),
- np.array([+1, +1, +1, +1]),
- )
-
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.num_resets = 0
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_push_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
@@ -85,6 +86,7 @@ def evaluate_state(self, obs, action):
in_place,
) = self.compute_reward(action, obs)
+ assert self.obj_init_pos is not None
info = {
"success": float(target_to_obj <= self.TARGET_RADIUS),
"near_object": float(tcp_to_obj <= 0.03),
@@ -101,14 +103,14 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("objGeom").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def fix_extreme_obj_pos(self, orig_init_pos):
+ def fix_extreme_obj_pos(self, orig_init_pos: npt.NDArray[Any]) -> npt.NDArray[Any]:
# This is to account for meshes for the geom and object are not
# aligned. If this is not done, the object could be initialized in an
# extreme position
@@ -116,9 +118,11 @@ def fix_extreme_obj_pos(self, orig_init_pos):
adjusted_pos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0,
# and geom_pos[2] is the object height
- return [adjusted_pos[0], adjusted_pos[1], self.get_body_com("obj")[-1]]
+ return np.array(
+ [adjusted_pos[0], adjusted_pos[1], self.get_body_com("obj")[-1]]
+ )
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = np.array(
@@ -131,19 +135,22 @@ def reset_model(self):
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
- self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
+ self._target_pos = np.concatenate([goal_pos[-3:-1], [self.obj_init_pos[-1]]])
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
self._set_obj_xyz(self.obj_init_pos)
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
obj = obs[4:7]
tcp_opened = obs[3]
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
- target_to_obj = np.linalg.norm(obj - self._target_pos)
- target_to_obj_init = np.linalg.norm(self.obj_init_pos - self._target_pos)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
+ target_to_obj = float(np.linalg.norm(obj - self._target_pos))
+ target_to_obj_init = float(np.linalg.norm(self.obj_init_pos - self._target_pos))
in_place = reward_utils.tolerance(
target_to_obj,
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_wall_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_wall_v2.py
index 99b26856e..430986b02 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_wall_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_push_wall_v2.py
@@ -1,15 +1,18 @@
"""Version 2 of SawyerPushWallEnv."""
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerPushWallEnvV2(SawyerXYZEnv):
@@ -28,9 +31,12 @@ class SawyerPushWallEnvV2(SawyerXYZEnv):
- (6/15/20) Separated reach-push-pick-place into 3 separate envs.
"""
- OBJ_RADIUS = 0.02
+ OBJ_RADIUS: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.05, 0.6, 0.015)
@@ -39,15 +45,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.05, 0.9, 0.02)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.6, 0.02]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
@@ -62,17 +65,20 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.num_resets = 0
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_push_wall_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -85,6 +91,7 @@ def evaluate_state(self, obs, action):
success = float(obj_to_target <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
+ assert self.obj_init_pos is not None
grasp_success = float(
self.touching_main_object
and (tcp_open > 0)
@@ -101,19 +108,21 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.geom("objGeom").xpos
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("objGeom").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def adjust_initObjPos(self, orig_init_pos):
+ def adjust_initObjPos(self, orig_init_pos: npt.NDArray[Any]) -> npt.NDArray[Any]:
diff = self.get_body_com("obj")[:2] - self.data.geom("objGeom").xpos[:2]
adjustedPos = orig_init_pos[:2] + diff
- return [adjustedPos[0], adjustedPos[1], self.data.geom("objGeom").xpos[-1]]
+ return np.array(
+ [adjustedPos[0], adjustedPos[1], self.data.geom("objGeom").xpos[-1]]
+ )
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config["obj_init_pos"])
@@ -124,30 +133,34 @@ def reset_model(self):
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
- self._target_pos = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
+ self._target_pos = np.concatenate([goal_pos[-3:-1], [self.obj_init_pos[-1]]])
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
self._set_obj_xyz(self.obj_init_pos)
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
midpoint = np.array([-0.05, 0.77, obj[2]])
target = self._target_pos
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
in_place_scaling = np.array([3.0, 1.0, 1.0])
- obj_to_midpoint = np.linalg.norm((obj - midpoint) * in_place_scaling)
- obj_to_midpoint_init = np.linalg.norm(
- (self.obj_init_pos - midpoint) * in_place_scaling
+ obj_to_midpoint = float(np.linalg.norm((obj - midpoint) * in_place_scaling))
+ obj_to_midpoint_init = float(
+ np.linalg.norm((self.obj_init_pos - midpoint) * in_place_scaling)
)
- obj_to_target = np.linalg.norm(obj - target)
- obj_to_target_init = np.linalg.norm(self.obj_init_pos - target)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ obj_to_target_init = float(np.linalg.norm(self.obj_init_pos - target))
in_place_part1 = reward_utils.tolerance(
obj_to_midpoint,
@@ -175,18 +188,18 @@ def compute_reward(self, action, obs):
reward = 2 * object_grasped
if tcp_to_obj < 0.02 and tcp_opened > 0:
- reward = 2 * object_grasped + 1.0 + 4.0 * in_place_part1
+ reward = 2.0 * object_grasped + 1.0 + 4.0 * in_place_part1
if obj[1] > 0.75:
reward = 2 * object_grasped + 1.0 + 4.0 + 3.0 * in_place_part2
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [
+ return (
reward,
tcp_to_obj,
tcp_opened,
- np.linalg.norm(obj - target),
+ float(np.linalg.norm(obj - target)),
object_grasped,
in_place_part2,
- ]
+ )
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_v2.py
index 3882a77c8..12a5a85b4 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_v2.py
@@ -1,14 +1,16 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerReachEnvV2(SawyerXYZEnv):
@@ -26,7 +28,10 @@ class SawyerReachEnvV2(SawyerXYZEnv):
- (6/15/20) Separated reach-push-pick-place into 3 separate envs.
"""
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.1, 0.8, 0.05)
goal_high = (0.1, 0.9, 0.3)
hand_low = (-0.5, 0.40, 0.05)
@@ -35,15 +40,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.1, 0.7, 0.02)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.0, 0.6, 0.02]),
"hand_init_pos": np.array([0.0, 0.6, 0.2]),
@@ -58,15 +60,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_reach_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
reward, reach_dist, in_place = self.compute_reward(action, obs)
success = float(reach_dist <= 0.05)
@@ -82,14 +87,14 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("objGeom").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def fix_extreme_obj_pos(self, orig_init_pos):
+ def fix_extreme_obj_pos(self, orig_init_pos: npt.NDArray[Any]) -> npt.NDArray[Any]:
# This is to account for meshes for the geom and object are not
# aligned. If this is not done, the object could be initialized in an
# extreme position
@@ -97,9 +102,11 @@ def fix_extreme_obj_pos(self, orig_init_pos):
adjusted_pos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0,
# and geom_pos[2] is the object height
- return [adjusted_pos[0], adjusted_pos[1], self.get_body_com("obj")[-1]]
+ return np.array(
+ [adjusted_pos[0], adjusted_pos[1], self.get_body_com("obj")[-1]]
+ )
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.fix_extreme_obj_pos(self.init_config["obj_init_pos"])
@@ -113,20 +120,25 @@ def reset_model(self):
self._target_pos = goal_pos[-3:]
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)
- mujoco.mj_forward(self.model, self.data)
+
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def compute_reward(self, actions, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float]:
+ assert self._target_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
# obj = obs[4:7]
# tcp_opened = obs[3]
target = self._target_pos
- tcp_to_target = np.linalg.norm(tcp - target)
- # obj_to_target = np.linalg.norm(obj - target)
+ tcp_to_target = float(np.linalg.norm(tcp - target))
+ # obj_to_target = float(np.linalg.norm(obj - target))
- in_place_margin = np.linalg.norm(self.hand_init_pos - target)
+ in_place_margin = float(np.linalg.norm(self.hand_init_pos - target))
in_place = reward_utils.tolerance(
tcp_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -134,4 +146,4 @@ def compute_reward(self, actions, obs):
sigmoid="long_tail",
)
- return [10 * in_place, tcp_to_target, in_place]
+ return (10 * in_place, tcp_to_target, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_wall_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_wall_v2.py
index d4638b21b..8a2780cd9 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_wall_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_reach_wall_v2.py
@@ -1,13 +1,16 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerReachWallEnvV2(SawyerXYZEnv):
@@ -25,7 +28,10 @@ class SawyerReachWallEnvV2(SawyerXYZEnv):
i.e. (self._target_pos - pos_hand)
"""
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.05, 0.85, 0.05)
goal_high = (0.05, 0.9, 0.3)
hand_low = (-0.5, 0.40, 0.05)
@@ -34,15 +40,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.05, 0.65, 0.015)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0, 0.6, 0.02]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
@@ -57,17 +60,20 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.num_resets = 0
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_reach_wall_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
reward, tcp_to_object, in_place = self.compute_reward(action, obs)
success = float(tcp_to_object <= 0.05)
@@ -83,14 +89,14 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("objGeom").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_angle = self.init_config["obj_init_angle"]
@@ -104,20 +110,23 @@ def reset_model(self):
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def compute_reward(self, actions, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
# obj = obs[4:7]
# tcp_opened = obs[3]
target = self._target_pos
- tcp_to_target = np.linalg.norm(tcp - target)
- # obj_to_target = np.linalg.norm(obj - target)
+ tcp_to_target = float(np.linalg.norm(tcp - target))
+ # obj_to_target = float(np.linalg.norm(obj - target))
- in_place_margin = np.linalg.norm(self.hand_init_pos - target)
+ in_place_margin = float(np.linalg.norm(self.hand_init_pos - target))
in_place = reward_utils.tolerance(
tcp_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -125,4 +134,4 @@ def compute_reward(self, actions, obs):
sigmoid="long_tail",
)
- return [10 * in_place, tcp_to_target, in_place]
+ return (10 * in_place, tcp_to_target, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_shelf_place_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_shelf_place_v2.py
index 19c8ae681..f565fe1ee 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_shelf_place_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_shelf_place_v2.py
@@ -1,18 +1,24 @@
+from __future__ import annotations
+
+from typing import Any
+
import mujoco
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerShelfPlaceEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.1, 0.8, 0.299)
goal_high = (0.1, 0.9, 0.301)
hand_low = (-0.5, 0.40, 0.05)
@@ -21,15 +27,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.1, 0.6, 0.021)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.6, 0.02]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0, 0.6, 0.2], dtype=np.float32),
@@ -44,15 +47,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_shelf_placing.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -64,8 +70,9 @@ def evaluate_state(self, obs, action):
) = self.compute_reward(action, obs)
success = float(obj_to_target <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
+ assert self.obj_init_pos is not None
grasp_success = float(
- self.touching_object
+ self.touching_main_object
and (tcp_open > 0)
and (obj[2] - 0.02 > self.obj_init_pos[2])
)
@@ -82,23 +89,23 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("objGeom").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def adjust_initObjPos(self, orig_init_pos):
+ def adjust_initObjPos(self, orig_init_pos: npt.NDArray[Any]) -> npt.NDArray[Any]:
# This is to account for meshes for the geom and object are not aligned
# If this is not done, the object could be initialized in an extreme position
diff = self.get_body_com("obj")[:2] - self.data.geom("objGeom").xpos[:2]
adjustedPos = orig_init_pos[:2] + diff
# The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height
- return [adjustedPos[0], adjustedPos[1], self.get_body_com("obj")[-1]]
+ return np.array([adjustedPos[0], adjustedPos[1], self.get_body_com("obj")[-1]])
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = self.adjust_initObjPos(self.init_config["obj_init_pos"])
self.obj_init_angle = self.init_config["obj_init_angle"]
@@ -111,32 +118,28 @@ def reset_model(self):
(base_shelf_pos[:2], [self.obj_init_pos[-1]])
)
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "shelf")
- ] = base_shelf_pos[-3:]
+ self.model.body("shelf").pos = base_shelf_pos[-3:]
mujoco.mj_forward(self.model, self.data)
- self._target_pos = (
- self.model.site_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_SITE, "goal")
- ]
- + self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "shelf")
- ]
- )
+ self._target_pos = self.model.site("goal").pos + self.model.body("shelf").pos
+ assert self.obj_init_pos is not None
self._set_obj_xyz(self.obj_init_pos)
-
+ assert self._target_pos is not None
+ self._set_pos_site("goal", self._target_pos)
return self._get_obs()
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = self._target_pos
- obj_to_target = np.linalg.norm(obj - target)
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
in_place_margin = np.linalg.norm(self.obj_init_pos - target)
in_place = reward_utils.tolerance(
@@ -185,4 +188,4 @@ def compute_reward(self, action, obs):
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_soccer_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_soccer_v2.py
index 51ec9babb..9132ac2d2 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_soccer_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_soccer_v2.py
@@ -1,21 +1,26 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerSoccerEnvV2(SawyerXYZEnv):
- OBJ_RADIUS = 0.013
- TARGET_RADIUS = 0.07
+ OBJ_RADIUS: float = 0.013
+ TARGET_RADIUS: float = 0.07
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
goal_low = (-0.1, 0.8, 0.0)
goal_high = (0.1, 0.9, 0.0)
hand_low = (-0.5, 0.40, 0.05)
@@ -24,15 +29,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.1, 0.7, 0.03)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0, 0.6, 0.03]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0.0, 0.6, 0.2]),
@@ -45,15 +47,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_soccer.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
obj = obs[4:7]
(
reward,
@@ -66,8 +71,9 @@ def evaluate_state(self, obs, action):
success = float(target_to_obj <= 0.07)
near_object = float(tcp_to_obj <= 0.03)
+ assert self.obj_init_pos is not None
grasp_success = float(
- self.touching_object
+ self.touching_main_object
and (tcp_opened > 0)
and (obj[2] - 0.02 > self.obj_init_pos[2])
)
@@ -83,14 +89,14 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("soccer_ball")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.body("soccer_ball").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_angle = self.init_config["obj_init_angle"]
@@ -100,18 +106,30 @@ def reset_model(self):
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
self._target_pos = goal_pos[3:]
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "goal_whole")
- ] = self._target_pos
+ assert self.obj_init_pos is not None
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
+ self.model.body("goal_whole").pos = self._target_pos
self._set_obj_xyz(self.obj_init_pos)
self.maxPushDist = np.linalg.norm(
self.obj_init_pos[:2] - np.array(self._target_pos)[:2]
)
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def _gripper_caging_reward(self, action, obj_position, obj_radius):
+ def _gripper_caging_reward(
+ self,
+ action: npt.NDArray[np.float32],
+ obj_pos: npt.NDArray[Any],
+ obj_radius: float,
+ pad_success_thresh: float = 0, # None of these args are used,
+ object_reach_radius: float = 0, # just here to match the parent's
+ xz_thresh: float = 0, # type signature
+ desired_gripper_effort: float = 1.0,
+ high_density: bool = False,
+ medium_density: bool = False,
+ ) -> float:
pad_success_margin = 0.05
grip_success_margin = obj_radius + 0.01
x_z_success_margin = 0.005
@@ -119,13 +137,13 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
tcp = self.tcp_center
left_pad = self.get_body_com("leftpad")
right_pad = self.get_body_com("rightpad")
- delta_object_y_left_pad = left_pad[1] - obj_position[1]
- delta_object_y_right_pad = obj_position[1] - right_pad[1]
+ delta_object_y_left_pad = left_pad[1] - obj_pos[1]
+ delta_object_y_right_pad = obj_pos[1] - right_pad[1]
right_caging_margin = abs(
- abs(obj_position[1] - self.init_right_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_right_pad[1]) - pad_success_margin
)
left_caging_margin = abs(
- abs(obj_position[1] - self.init_left_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_left_pad[1]) - pad_success_margin
)
right_caging = reward_utils.tolerance(
@@ -163,10 +181,9 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
assert y_caging >= 0 and y_caging <= 1
tcp_xz = tcp + np.array([0.0, -tcp[1], 0.0])
- obj_position_x_z = np.copy(obj_position) + np.array(
- [0.0, -obj_position[1], 0.0]
- )
+ obj_position_x_z = np.copy(obj_pos) + np.array([0.0, -obj_pos[1], 0.0])
tcp_obj_norm_x_z = np.linalg.norm(tcp_xz - obj_position_x_z, ord=2)
+ assert self.obj_init_pos is not None
init_obj_x_z = self.obj_init_pos + np.array([0.0, -self.obj_init_pos[1], 0.0])
init_tcp_x_z = self.init_tcp + np.array([0.0, -self.init_tcp[1], 0.0])
@@ -174,7 +191,7 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
np.linalg.norm(init_obj_x_z - init_tcp_x_z, ord=2) - x_z_success_margin
)
x_z_caging = reward_utils.tolerance(
- tcp_obj_norm_x_z,
+ float(tcp_obj_norm_x_z),
bounds=(0, x_z_success_margin),
margin=tcp_obj_x_z_margin,
sigmoid="long_tail",
@@ -197,13 +214,18 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
return caging_and_gripping
- def compute_reward(self, action, obs):
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
obj = obs[4:7]
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
x_scaling = np.array([3.0, 1.0, 1.0])
- tcp_to_obj = np.linalg.norm(obj - self.tcp_center)
- target_to_obj = np.linalg.norm((obj - self._target_pos) * x_scaling)
- target_to_obj_init = np.linalg.norm((obj - self.obj_init_pos) * x_scaling)
+ tcp_to_obj = float(np.linalg.norm(obj - self.tcp_center))
+ target_to_obj = float(np.linalg.norm((obj - self._target_pos) * x_scaling))
+ target_to_obj_init = float(
+ np.linalg.norm((obj - self.obj_init_pos) * x_scaling)
+ )
in_place = reward_utils.tolerance(
target_to_obj,
@@ -228,7 +250,7 @@ def compute_reward(self, action, obs):
reward,
tcp_to_obj,
tcp_opened,
- np.linalg.norm(obj - self._target_pos),
+ float(np.linalg.norm(obj - self._target_pos)),
object_grasped,
in_place,
)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_pull_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_pull_v2.py
index 3b899a072..1d73122be 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_pull_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_pull_v2.py
@@ -1,17 +1,23 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import ObservationDict, StickInitConfigDict
class SawyerStickPullEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.35, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.55, 0.000)
@@ -20,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.45, 0.55, 0.0201)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: StickInitConfigDict = {
"stick_init_pos": np.array([0, 0.6, 0.02]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
}
@@ -39,19 +42,22 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
# Fix object init position.
self.obj_init_pos = np.array([0.2, 0.69, 0.0])
self.obj_init_qpos = np.array([0.0, 0.09])
- self.obj_space = Box(np.array(obj_low), np.array(obj_high))
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.obj_space = Box(np.array(obj_low), np.array(obj_high), dtype=np.float64)
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_stick_obj.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
stick = obs[4:7]
handle = obs[11:14]
end_of_stick = self._get_site_pos("stick_end")
@@ -64,13 +70,14 @@ def evaluate_state(self, obs, action):
stick_in_place,
) = self.compute_reward(action, obs)
+ assert self._target_pos is not None and self.obj_init_pos is not None
success = float(
(np.linalg.norm(handle - self._target_pos) <= 0.12)
and self._stick_is_inserted(handle, end_of_stick)
)
near_object = float(tcp_to_obj <= 0.03)
grasp_success = float(
- self.touching_object
+ self.touching_main_object
and (tcp_open > 0)
and (stick[2] - 0.02 > self.obj_init_pos[2])
)
@@ -87,7 +94,7 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return np.hstack(
(
self.get_body_com("stick").copy(),
@@ -95,7 +102,7 @@ def _get_pos_objects(self):
)
)
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.body("stick").xmat.reshape(3, 3)
return np.hstack(
(
@@ -111,26 +118,26 @@ def _get_quat_objects(self):
)
)
- def _get_obs_dict(self):
+ def _get_obs_dict(self) -> ObservationDict:
obs_dict = super()._get_obs_dict()
obs_dict["state_achieved_goal"] = self._get_site_pos("insertion")
return obs_dict
- def _set_stick_xyz(self, pos):
+ def _set_stick_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[16:18] = pos.copy()
qvel[16:18] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.obj_init_pos = np.array([0.2, 0.69, 0.04])
self.obj_init_qpos = np.array([0.0, 0.09])
@@ -140,39 +147,46 @@ def reset_model(self):
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.1:
goal_pos = self._get_state_rand_vec()
- self.stick_init_pos = np.concatenate((goal_pos[:2], [self.stick_init_pos[-1]]))
- self._target_pos = np.concatenate((goal_pos[-3:-1], [self.stick_init_pos[-1]]))
+ self.stick_init_pos = np.concatenate([goal_pos[:2], [self.stick_init_pos[-1]]])
+ self._target_pos = np.concatenate([goal_pos[-3:-1], [self.stick_init_pos[-1]]])
self._set_stick_xyz(self.stick_init_pos)
self._set_obj_xyz(self.obj_init_qpos)
self.obj_init_pos = self.get_body_com("object").copy()
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def _stick_is_inserted(self, handle, end_of_stick):
+ def _stick_is_inserted(
+ self, handle: npt.NDArray[Any], end_of_stick: npt.NDArray[Any]
+ ) -> bool:
return (
(end_of_stick[0] >= handle[0])
and (np.abs(end_of_stick[1] - handle[1]) <= 0.040)
and (np.abs(end_of_stick[2] - handle[2]) <= 0.060)
)
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
stick = obs[4:7]
end_of_stick = self._get_site_pos("stick_end")
container = obs[11:14] + np.array([0.05, 0.0, 0.0])
container_init_pos = self.obj_init_pos + np.array([0.05, 0.0, 0.0])
handle = obs[11:14]
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
target = self._target_pos
- tcp_to_stick = np.linalg.norm(stick - tcp)
- handle_to_target = np.linalg.norm(handle - target)
+ tcp_to_stick = float(np.linalg.norm(stick - tcp))
+ handle_to_target = float(np.linalg.norm(handle - target))
yz_scaling = np.array([1.0, 1.0, 2.0])
- stick_to_container = np.linalg.norm((stick - container) * yz_scaling)
- stick_in_place_margin = np.linalg.norm(
- (self.stick_init_pos - container_init_pos) * yz_scaling
+ stick_to_container = float(np.linalg.norm((stick - container) * yz_scaling))
+ stick_in_place_margin = float(
+ np.linalg.norm((self.stick_init_pos - container_init_pos) * yz_scaling)
)
stick_in_place = reward_utils.tolerance(
stick_to_container,
@@ -181,8 +195,8 @@ def compute_reward(self, action, obs):
sigmoid="long_tail",
)
- stick_to_target = np.linalg.norm(stick - target)
- stick_in_place_margin_2 = np.linalg.norm(self.stick_init_pos - target)
+ stick_to_target = float(np.linalg.norm(stick - target))
+ stick_in_place_margin_2 = float(np.linalg.norm(self.stick_init_pos - target))
stick_in_place_2 = reward_utils.tolerance(
stick_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -190,8 +204,8 @@ def compute_reward(self, action, obs):
sigmoid="long_tail",
)
- container_to_target = np.linalg.norm(container - target)
- container_in_place_margin = np.linalg.norm(self.obj_init_pos - target)
+ container_to_target = float(np.linalg.norm(container - target))
+ container_in_place_margin = float(np.linalg.norm(self.obj_init_pos - target))
container_in_place = reward_utils.tolerance(
container_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -236,11 +250,11 @@ def compute_reward(self, action, obs):
if handle_to_target <= 0.12:
reward = 10.0
- return [
+ return (
reward,
tcp_to_stick,
tcp_opened,
handle_to_target,
object_grasped,
stick_in_place,
- ]
+ )
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_push_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_push_v2.py
index 47d39b044..d5ac20de6 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_push_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_stick_push_v2.py
@@ -1,17 +1,23 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import ObservationDict, StickInitConfigDict
class SawyerStickPushEnvV2(SawyerXYZEnv):
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.08, 0.58, 0.000)
@@ -20,15 +26,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.401, 0.6, 0.1321)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: StickInitConfigDict = {
"stick_init_pos": np.array([-0.1, 0.6, 0.02]),
"hand_init_pos": np.array([0, 0.6, 0.2]),
}
@@ -39,19 +42,22 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
# For now, fix the object initial position.
self.obj_init_pos = np.array([0.2, 0.6, 0.0])
self.obj_init_qpos = np.array([0.0, 0.0])
- self.obj_space = Box(np.array(obj_low), np.array(obj_high))
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.obj_space = Box(np.array(obj_low), np.array(obj_high), dtype=np.float64)
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_stick_obj.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
stick = obs[4:7]
container = obs[11:14]
(
@@ -62,10 +68,11 @@ def evaluate_state(self, obs, action):
grasp_reward,
stick_in_place,
) = self.compute_reward(action, obs)
+ assert self._target_pos is not None
success = float(np.linalg.norm(container - self._target_pos) <= 0.12)
near_object = float(tcp_to_obj <= 0.03)
grasp_success = float(
- self.touching_object
+ self.touching_main_object
and (tcp_open > 0)
and (stick[2] - 0.01 > self.stick_init_pos[2])
)
@@ -82,7 +89,7 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return np.hstack(
(
self.get_body_com("stick").copy(),
@@ -90,7 +97,7 @@ def _get_pos_objects(self):
)
)
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.body("stick").xmat.reshape(3, 3)
return np.hstack(
(
@@ -106,28 +113,28 @@ def _get_quat_objects(self):
)
)
- def _get_obs_dict(self):
+ def _get_obs_dict(self) -> ObservationDict:
obs_dict = super()._get_obs_dict()
obs_dict["state_achieved_goal"] = self._get_site_pos("insertion") + np.array(
[0.0, 0.09, 0.0]
)
return obs_dict
- def _set_stick_xyz(self, pos):
+ def _set_stick_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
- def _set_obj_xyz(self, pos):
+ def _set_obj_xyz(self, pos: npt.NDArray[Any]) -> None:
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[16:18] = pos.copy()
qvel[16:18] = 0
self.set_state(qpos, qvel)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.stick_init_pos = self.init_config["stick_init_pos"]
self._target_pos = np.array([0.4, 0.6, self.stick_init_pos[-1]])
@@ -135,29 +142,31 @@ def reset_model(self):
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - goal_pos[-3:-1]) < 0.1:
goal_pos = self._get_state_rand_vec()
- self.stick_init_pos = np.concatenate((goal_pos[:2], [self.stick_init_pos[-1]]))
+ self.stick_init_pos = np.concatenate([goal_pos[:2], [self.stick_init_pos[-1]]])
self._target_pos = np.concatenate(
- (goal_pos[-3:-1], [self._get_site_pos("insertion")[-1]])
+ [goal_pos[-3:-1], [self._get_site_pos("insertion")[-1]]]
)
self._set_stick_xyz(self.stick_init_pos)
self._set_obj_xyz(self.obj_init_qpos)
self.obj_init_pos = self.get_body_com("object").copy()
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
def _gripper_caging_reward(
self,
- action,
- obj_pos,
- obj_radius,
- pad_success_thresh,
- object_reach_radius,
- xz_thresh,
- desired_gripper_effort=1.0,
- high_density=False,
- medium_density=False,
- ):
+ action: npt.NDArray[np.float32],
+ obj_pos: npt.NDArray[Any],
+ obj_radius: float,
+ pad_success_thresh: float,
+ object_reach_radius: float,
+ xz_thresh: float,
+ desired_gripper_effort: float = 1.0,
+ high_density: bool = False,
+ medium_density: bool = False,
+ ) -> float:
"""Reward for agent grasping obj.
Args:
@@ -208,7 +217,9 @@ def _gripper_caging_reward(
caging_xz_margin = np.linalg.norm(self.stick_init_pos[xz] - self.init_tcp[xz])
caging_xz_margin -= xz_thresh
caging_xz = reward_utils.tolerance(
- np.linalg.norm(tcp[xz] - obj_pos[xz]), # "x" in the description above
+ float(
+ np.linalg.norm(tcp[xz] - obj_pos[xz])
+ ), # "x" in the description above
bounds=(0, xz_thresh),
margin=caging_xz_margin, # "margin" in the description above
sigmoid="long_tail",
@@ -232,7 +243,7 @@ def _gripper_caging_reward(
tcp_to_obj_init = np.linalg.norm(self.stick_init_pos - self.init_tcp)
reach_margin = abs(tcp_to_obj_init - object_reach_radius)
reach = reward_utils.tolerance(
- tcp_to_obj,
+ float(tcp_to_obj),
bounds=(0, object_reach_radius),
margin=reach_margin,
sigmoid="long_tail",
@@ -241,19 +252,22 @@ def _gripper_caging_reward(
return caging_and_gripping
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.12
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
+ _TARGET_RADIUS: float = 0.12
tcp = self.tcp_center
stick = obs[4:7] + np.array([0.015, 0.0, 0.0])
container = obs[11:14]
- tcp_opened = obs[3]
+ tcp_opened: float = obs[3]
target = self._target_pos
- tcp_to_stick = np.linalg.norm(stick - tcp)
- stick_to_target = np.linalg.norm(stick - target)
- stick_in_place_margin = (
- np.linalg.norm(self.stick_init_pos - target)
- ) - _TARGET_RADIUS
+ tcp_to_stick = float(np.linalg.norm(stick - tcp))
+ stick_to_target = float(np.linalg.norm(stick - target))
+ stick_in_place_margin = float(
+ np.linalg.norm(self.stick_init_pos - target) - _TARGET_RADIUS
+ )
stick_in_place = reward_utils.tolerance(
stick_to_target,
bounds=(0, _TARGET_RADIUS),
@@ -261,8 +275,8 @@ def compute_reward(self, action, obs):
sigmoid="long_tail",
)
- container_to_target = np.linalg.norm(container - target)
- container_in_place_margin = (
+ container_to_target = float(np.linalg.norm(container - target))
+ container_in_place_margin = float(
np.linalg.norm(self.obj_init_pos - target) - _TARGET_RADIUS
)
container_in_place = reward_utils.tolerance(
@@ -294,11 +308,11 @@ def compute_reward(self, action, obs):
if container_to_target <= _TARGET_RADIUS:
reward = 10.0
- return [
+ return (
reward,
tcp_to_stick,
tcp_opened,
container_to_target,
object_grasped,
stick_in_place,
- ]
+ )
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_into_goal_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_into_goal_v2.py
index 10f275c2c..776fc8e8a 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_into_goal_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_into_goal_v2.py
@@ -1,19 +1,25 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
from scipy.spatial.transform import Rotation
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerSweepIntoGoalEnvV2(SawyerXYZEnv):
- OBJ_RADIUS = 0.02
+ OBJ_RADIUS: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.6, 0.02)
@@ -22,15 +28,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (+0.001, 0.8401, 0.0201)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0.0, 0.6, 0.02]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0.0, 0.6, 0.2]),
@@ -43,15 +46,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self._random_reset_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
+ dtype=np.float64,
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_table_with_hole.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
# obj = obs[4:7]
(
reward,
@@ -75,14 +81,14 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
geom_xmat = self.data.geom("objGeom").xmat.reshape(3, 3)
return Rotation.from_matrix(geom_xmat).as_quat()
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.get_body_com("obj")
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.get_body_com("obj")
@@ -92,16 +98,25 @@ def reset_model(self):
goal_pos = self._get_state_rand_vec()
while np.linalg.norm(goal_pos[:2] - self._target_pos[:2]) < 0.15:
goal_pos = self._get_state_rand_vec()
- self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
+ assert self.obj_init_pos is not None
+ self.obj_init_pos = np.concatenate([goal_pos[:2], [self.obj_init_pos[-1]]])
self._set_obj_xyz(self.obj_init_pos)
- self.maxPushDist = np.linalg.norm(
- self.obj_init_pos[:2] - np.array(self._target_pos)[:2]
- )
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def _gripper_caging_reward(self, action, obj_position, obj_radius):
+ def _gripper_caging_reward(
+ self,
+ action: npt.NDArray[np.float32],
+ obj_pos: npt.NDArray[Any],
+ obj_radius: float,
+ pad_success_thresh: float = 0, # All of these args are unused,
+ object_reach_radius: float = 0, # just there to match the parent's type signature
+ xz_thresh: float = 0,
+ desired_gripper_effort: float = 1.0,
+ high_density: bool = False,
+ medium_density: bool = False,
+ ) -> float:
pad_success_margin = 0.05
grip_success_margin = obj_radius + 0.005
x_z_success_margin = 0.01
@@ -109,13 +124,13 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
tcp = self.tcp_center
left_pad = self.get_body_com("leftpad")
right_pad = self.get_body_com("rightpad")
- delta_object_y_left_pad = left_pad[1] - obj_position[1]
- delta_object_y_right_pad = obj_position[1] - right_pad[1]
+ delta_object_y_left_pad = left_pad[1] - obj_pos[1]
+ delta_object_y_right_pad = obj_pos[1] - right_pad[1]
right_caging_margin = abs(
- abs(obj_position[1] - self.init_right_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_right_pad[1]) - pad_success_margin
)
left_caging_margin = abs(
- abs(obj_position[1] - self.init_left_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_left_pad[1]) - pad_success_margin
)
right_caging = reward_utils.tolerance(
@@ -153,10 +168,9 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
assert y_caging >= 0 and y_caging <= 1
tcp_xz = tcp + np.array([0.0, -tcp[1], 0.0])
- obj_position_x_z = np.copy(obj_position) + np.array(
- [0.0, -obj_position[1], 0.0]
- )
+ obj_position_x_z = np.copy(obj_pos) + np.array([0.0, -obj_pos[1], 0.0])
tcp_obj_norm_x_z = np.linalg.norm(tcp_xz - obj_position_x_z, ord=2)
+ assert self.obj_init_pos is not None
init_obj_x_z = self.obj_init_pos + np.array([0.0, -self.obj_init_pos[1], 0.0])
init_tcp_x_z = self.init_tcp + np.array([0.0, -self.init_tcp[1], 0.0])
@@ -164,7 +178,7 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
np.linalg.norm(init_obj_x_z - init_tcp_x_z, ord=2) - x_z_success_margin
)
x_z_caging = reward_utils.tolerance(
- tcp_obj_norm_x_z,
+ float(tcp_obj_norm_x_z),
bounds=(0, x_z_success_margin),
margin=tcp_obj_x_z_margin,
sigmoid="long_tail",
@@ -187,15 +201,18 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
return caging_and_gripping
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = np.array([self._target_pos[0], self._target_pos[1], obj[2]])
- obj_to_target = np.linalg.norm(obj - target)
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
in_place_margin = np.linalg.norm(self.obj_init_pos - target)
in_place = reward_utils.tolerance(
@@ -214,4 +231,4 @@ def compute_reward(self, action, obs):
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_v2.py
index 8d44d1ceb..8d47b47a6 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_sweep_v2.py
@@ -1,18 +1,24 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerSweepEnvV2(SawyerXYZEnv):
- OBJ_RADIUS = 0.02
+ OBJ_RADIUS: float = 0.02
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
init_puck_z = 0.1
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1.0, 0.5)
@@ -22,15 +28,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = (0.51, 0.7, 0.02)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_pos": np.array([0.0, 0.6, 0.02]),
"obj_init_angle": 0.3,
"hand_init_pos": np.array([0.0, 0.6, 0.2]),
@@ -43,17 +46,18 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.init_puck_z = init_puck_z
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_sweep_v2.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -76,31 +80,38 @@ def evaluate_state(self, obs, action):
}
return reward, info
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return self.data.body("obj").xquat
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self.data.body("obj").xpos
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config["obj_init_pos"]
self.objHeight = self._get_pos_objects()[2]
obj_pos = self._get_state_rand_vec()
- self.obj_init_pos = np.concatenate((obj_pos[:2], [self.obj_init_pos[-1]]))
+ self.obj_init_pos = np.concatenate([obj_pos[:2], [self.obj_init_pos[-1]]])
self._target_pos[1] = obj_pos.copy()[1]
self._set_obj_xyz(self.obj_init_pos)
- self.maxPushDist = np.linalg.norm(
- self.get_body_com("obj")[:-1] - self._target_pos[:-1]
- )
- self.target_reward = 1000 * self.maxPushDist + 1000 * 2
-
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def _gripper_caging_reward(self, action, obj_position, obj_radius):
+ def _gripper_caging_reward(
+ self,
+ action: npt.NDArray[np.float32],
+ obj_pos: npt.NDArray[Any],
+ obj_radius: float,
+ pad_success_thresh: float = 0, # All of these args are unused
+ object_reach_radius: float = 0, # just here to match the parent's type signature
+ xz_thresh: float = 0,
+ desired_gripper_effort: float = 1.0,
+ high_density: bool = False,
+ medium_density: bool = False,
+ ) -> float:
pad_success_margin = 0.05
grip_success_margin = obj_radius + 0.01
x_z_success_margin = 0.005
@@ -108,13 +119,13 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
tcp = self.tcp_center
left_pad = self.get_body_com("leftpad")
right_pad = self.get_body_com("rightpad")
- delta_object_y_left_pad = left_pad[1] - obj_position[1]
- delta_object_y_right_pad = obj_position[1] - right_pad[1]
+ delta_object_y_left_pad = left_pad[1] - obj_pos[1]
+ delta_object_y_right_pad = obj_pos[1] - right_pad[1]
right_caging_margin = abs(
- abs(obj_position[1] - self.init_right_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_right_pad[1]) - pad_success_margin
)
left_caging_margin = abs(
- abs(obj_position[1] - self.init_left_pad[1]) - pad_success_margin
+ abs(obj_pos[1] - self.init_left_pad[1]) - pad_success_margin
)
right_caging = reward_utils.tolerance(
@@ -152,10 +163,9 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
assert y_caging >= 0 and y_caging <= 1
tcp_xz = tcp + np.array([0.0, -tcp[1], 0.0])
- obj_position_x_z = np.copy(obj_position) + np.array(
- [0.0, -obj_position[1], 0.0]
- )
+ obj_position_x_z = np.copy(obj_pos) + np.array([0.0, -obj_pos[1], 0.0])
tcp_obj_norm_x_z = np.linalg.norm(tcp_xz - obj_position_x_z, ord=2)
+ assert self.obj_init_pos is not None
init_obj_x_z = self.obj_init_pos + np.array([0.0, -self.obj_init_pos[1], 0.0])
init_tcp_x_z = self.init_tcp + np.array([0.0, -self.init_tcp[1], 0.0])
@@ -163,7 +173,7 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
np.linalg.norm(init_obj_x_z - init_tcp_x_z, ord=2) - x_z_success_margin
)
x_z_caging = reward_utils.tolerance(
- tcp_obj_norm_x_z,
+ float(tcp_obj_norm_x_z),
bounds=(0, x_z_success_margin),
margin=tcp_obj_x_z_margin,
sigmoid="long_tail",
@@ -186,15 +196,18 @@ def _gripper_caging_reward(self, action, obj_position, obj_radius):
return caging_and_gripping
- def compute_reward(self, action, obs):
- _TARGET_RADIUS = 0.05
+ def compute_reward(
+ self, action: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None
+ _TARGET_RADIUS: float = 0.05
tcp = self.tcp_center
obj = obs[4:7]
tcp_opened = obs[3]
target = self._target_pos
- obj_to_target = np.linalg.norm(obj - target)
- tcp_to_obj = np.linalg.norm(obj - tcp)
+ obj_to_target = float(np.linalg.norm(obj - target))
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
in_place_margin = np.linalg.norm(self.obj_init_pos - target)
in_place = reward_utils.tolerance(
@@ -213,4 +226,4 @@ def compute_reward(self, action, obs):
if obj_to_target < _TARGET_RADIUS:
reward = 10.0
- return [reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place]
+ return (reward, tcp_to_obj, tcp_opened, obj_to_target, object_grasped, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_close_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_close_v2.py
index 41308be65..351af2d0e 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_close_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_close_v2.py
@@ -1,13 +1,15 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerWindowCloseEnvV2(SawyerXYZEnv):
@@ -23,9 +25,12 @@ class SawyerWindowCloseEnvV2(SawyerXYZEnv):
- (6/15/20) Increased max_path_length from 150 to 200
"""
- TARGET_RADIUS = 0.05
+ TARGET_RADIUS: float = 0.05
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
liftThresh = 0.02
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
@@ -33,15 +38,12 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
obj_high = (0.0, 0.9, 0.2)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
+ self.init_config: InitConfigDict = {
"obj_init_angle": 0.3,
"obj_init_pos": np.array([0.1, 0.785, 0.16], dtype=np.float32),
"hand_init_pos": np.array([0, 0.4, 0.2], dtype=np.float32),
@@ -56,20 +58,21 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
self.liftThresh = liftThresh
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.maxPullDist = 0.2
self.target_reward = 1000 * self.maxPullDist + 1000 * 2
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_window_horizontal.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -91,44 +94,45 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleCloseStart")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.zeros(4)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.prev_obs = self._get_curr_obs_combined_no_goal()
self.obj_init_pos = self._get_state_rand_vec()
self._target_pos = self.obj_init_pos.copy()
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "window")
- ] = self.obj_init_pos
+ self.model.body("window").pos = self.obj_init_pos
self.window_handle_pos_init = self._get_pos_objects() + np.array(
[0.2, 0.0, 0.0]
)
self.data.joint("window_slide").qpos = 0.2
- mujoco.mj_forward(self.model, self.data)
+ self.model.site("goal").pos = self._target_pos
return self._get_obs()
- def _reset_hand(self):
- super()._reset_hand()
+ def _reset_hand(self, steps: int = 50) -> None:
+ super()._reset_hand(steps=steps)
self.init_tcp = self.tcp_center
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None
del actions
obj = self._get_pos_objects()
tcp = self.tcp_center
target = self._target_pos.copy()
- target_to_obj = obj[0] - target[0]
- target_to_obj = np.linalg.norm(target_to_obj)
+ target_to_obj: float = obj[0] - target[0]
+ target_to_obj = float(np.linalg.norm(target_to_obj))
target_to_obj_init = self.window_handle_pos_init[0] - target[0]
- target_to_obj_init = np.linalg.norm(target_to_obj_init)
+ target_to_obj_init = float(np.linalg.norm(target_to_obj_init))
in_place = reward_utils.tolerance(
target_to_obj,
@@ -138,8 +142,10 @@ def compute_reward(self, actions, obs):
)
handle_radius = 0.02
- tcp_to_obj = np.linalg.norm(obj - tcp)
- tcp_to_obj_init = np.linalg.norm(self.window_handle_pos_init - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ tcp_to_obj_init = float(
+ np.linalg.norm(self.window_handle_pos_init - self.init_tcp)
+ )
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, handle_radius),
@@ -147,7 +153,7 @@ def compute_reward(self, actions, obs):
sigmoid="gaussian",
)
# reward = reach
- tcp_opened = 0
+ tcp_opened = 0.0
object_grasped = reach
reward = 10 * reward_utils.hamacher_product(reach, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_open_v2.py b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_open_v2.py
index 1d84ef514..85b377aaa 100644
--- a/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_open_v2.py
+++ b/metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_window_open_v2.py
@@ -1,13 +1,15 @@
-import mujoco
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from gymnasium.spaces import Box
-from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import (
- SawyerXYZEnv,
- _assert_task_is_set,
-)
+from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import RenderMode, SawyerXYZEnv
+from metaworld.envs.mujoco.utils import reward_utils
+from metaworld.types import InitConfigDict
class SawyerWindowOpenEnvV2(SawyerXYZEnv):
@@ -22,30 +24,25 @@ class SawyerWindowOpenEnvV2(SawyerXYZEnv):
- (6/15/20) Increased max_path_length from 150 to 200
"""
- TARGET_RADIUS = 0.05
+ TARGET_RADIUS: float = 0.05
- def __init__(self, render_mode=None, camera_name=None, camera_id=None):
+ def __init__(
+ self,
+ **render_kwargs: dict[str, Any] | None,
+ ) -> None:
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.7, 0.16)
obj_high = (0.1, 0.9, 0.16)
super().__init__(
- self.model_name,
hand_low=hand_low,
hand_high=hand_high,
- render_mode=render_mode,
- camera_name=camera_name,
- camera_id=camera_id,
+ **render_kwargs,
)
- self.init_config = {
- "obj_init_angle": np.array(
- [
- 0.3,
- ],
- dtype=np.float32,
- ),
+ self.init_config: InitConfigDict = {
+ "obj_init_angle": 0.3,
"obj_init_pos": np.array([-0.1, 0.785, 0.16], dtype=np.float32),
"hand_init_pos": np.array([0, 0.4, 0.2], dtype=np.float32),
}
@@ -57,20 +54,21 @@ def __init__(self, render_mode=None, camera_name=None, camera_id=None):
goal_high = self.hand_high
self._random_reset_space = Box(
- np.array(obj_low),
- np.array(obj_high),
+ np.array(obj_low), np.array(obj_high), dtype=np.float64
)
- self.goal_space = Box(np.array(goal_low), np.array(goal_high))
+ self.goal_space = Box(np.array(goal_low), np.array(goal_high), dtype=np.float64)
self.maxPullDist = 0.2
self.target_reward = 1000 * self.maxPullDist + 1000 * 2
@property
- def model_name(self):
+ def model_name(self) -> str:
return full_v2_path_for("sawyer_xyz/sawyer_window_horizontal.xml")
- @_assert_task_is_set
- def evaluate_state(self, obs, action):
+ @SawyerXYZEnv._Decorators.assert_task_is_set
+ def evaluate_state(
+ self, obs: npt.NDArray[np.float64], action: npt.NDArray[np.float32]
+ ) -> tuple[float, dict[str, Any]]:
(
reward,
tcp_to_obj,
@@ -92,38 +90,42 @@ def evaluate_state(self, obs, action):
return reward, info
- def _get_pos_objects(self):
+ def _get_pos_objects(self) -> npt.NDArray[Any]:
return self._get_site_pos("handleOpenStart")
- def _get_quat_objects(self):
+ def _get_quat_objects(self) -> npt.NDArray[Any]:
return np.zeros(4)
- def reset_model(self):
+ def reset_model(self) -> npt.NDArray[np.float64]:
self._reset_hand()
self.prev_obs = self._get_curr_obs_combined_no_goal()
self.obj_init_pos = self._get_state_rand_vec()
self._target_pos = self.obj_init_pos + np.array([0.2, 0.0, 0.0])
- self.model.body_pos[
- mujoco.mj_name2id(self.model, mujoco.mjtObj.mjOBJ_BODY, "window")
- ] = self.obj_init_pos
+ self.model.body("window").pos = self.obj_init_pos
self.window_handle_pos_init = self._get_pos_objects()
self.data.joint("window_slide").qpos = 0.0
- mujoco.mj_forward(self.model, self.data)
+ assert self._target_pos is not None
+
+ self.model.site("goal").pos = self._target_pos
+
return self._get_obs()
- def compute_reward(self, actions, obs):
+ def compute_reward(
+ self, actions: npt.NDArray[Any], obs: npt.NDArray[np.float64]
+ ) -> tuple[float, float, float, float, float, float]:
+ assert self._target_pos is not None and self.obj_init_pos is not None
del actions
obj = self._get_pos_objects()
tcp = self.tcp_center
target = self._target_pos.copy()
- target_to_obj = obj[0] - target[0]
- target_to_obj = np.linalg.norm(target_to_obj)
+ target_to_obj: float = obj[0] - target[0]
+ target_to_obj = float(np.linalg.norm(target_to_obj))
target_to_obj_init = self.obj_init_pos[0] - target[0]
- target_to_obj_init = np.linalg.norm(target_to_obj_init)
+ target_to_obj_init = float(np.linalg.norm(target_to_obj_init))
in_place = reward_utils.tolerance(
target_to_obj,
@@ -133,15 +135,17 @@ def compute_reward(self, actions, obs):
)
handle_radius = 0.02
- tcp_to_obj = np.linalg.norm(obj - tcp)
- tcp_to_obj_init = np.linalg.norm(self.window_handle_pos_init - self.init_tcp)
+ tcp_to_obj = float(np.linalg.norm(obj - tcp))
+ tcp_to_obj_init = float(
+ np.linalg.norm(self.window_handle_pos_init - self.init_tcp)
+ )
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, handle_radius),
margin=abs(tcp_to_obj_init - handle_radius),
sigmoid="long_tail",
)
- tcp_opened = 0
+ tcp_opened = 0.0
object_grasped = reach
reward = 10 * reward_utils.hamacher_product(reach, in_place)
diff --git a/metaworld/envs/mujoco/sawyer_xyz/visual/__init__.py b/metaworld/envs/mujoco/utils/__init__.py
similarity index 100%
rename from metaworld/envs/mujoco/sawyer_xyz/visual/__init__.py
rename to metaworld/envs/mujoco/utils/__init__.py
diff --git a/metaworld/envs/reward_utils.py b/metaworld/envs/mujoco/utils/reward_utils.py
similarity index 61%
rename from metaworld/envs/reward_utils.py
rename to metaworld/envs/mujoco/utils/reward_utils.py
index affee8c35..f11b47563 100644
--- a/metaworld/envs/reward_utils.py
+++ b/metaworld/envs/mujoco/utils/reward_utils.py
@@ -1,21 +1,40 @@
"""A set of reward utilities written by the authors of dm_control."""
+from __future__ import annotations
+
+from typing import Any, Literal, TypeVar
import numpy as np
+import numpy.typing as npt
# The value returned by tolerance() at `margin` distance from `bounds` interval.
_DEFAULT_VALUE_AT_MARGIN = 0.1
-def _sigmoids(x, value_at_1, sigmoid):
- """Returns 1 when `x` == 0, between 0 and 1 otherwise.
+SIGMOID_TYPE = Literal[
+ "gaussian",
+ "hyperbolic",
+ "long_tail",
+ "reciprocal",
+ "cosine",
+ "linear",
+ "quadratic",
+ "tanh_squared",
+]
+
+X = TypeVar("X", float, npt.NDArray, np.floating)
+
+
+def _sigmoids(x: X, value_at_1: float, sigmoid: SIGMOID_TYPE) -> X:
+ """Maps the input to values between 0 and 1 using a specified sigmoid function. Returns 1 when the input is 0, between 0 and 1 otherwise.
Args:
- x: A scalar or numpy array.
- value_at_1: A float between 0 and 1 specifying the output when `x` == 1.
- sigmoid: String, choice of sigmoid type.
+ x: The input.
+ value_at_1: The output value when `x` == 1. Must be between 0 and 1.
+ sigmoid: Choice of sigmoid type. Valid values are 'gaussian', 'hyperbolic',
+ 'long_tail', 'reciprocal', 'cosine', 'linear', 'quadratic', 'tanh_squared'.
Returns:
- A numpy array with values between 0.0 and 1.0.
+ The input mapped to values between 0.0 and 1.0.
Raises:
ValueError: If not 0 < `value_at_1` < 1, except for `linear`, `cosine` and
@@ -25,14 +44,12 @@ def _sigmoids(x, value_at_1, sigmoid):
if sigmoid in ("cosine", "linear", "quadratic"):
if not 0 <= value_at_1 < 1:
raise ValueError(
- "`value_at_1` must be nonnegative and smaller than 1, "
- "got {}.".format(value_at_1)
+ f"`value_at_1` must be nonnegative and smaller than 1, got {value_at_1}."
)
else:
if not 0 < value_at_1 < 1:
raise ValueError(
- "`value_at_1` must be strictly between 0 and 1, "
- "got {}.".format(value_at_1)
+ f"`value_at_1` must be strictly between 0 and 1, got {value_at_1}."
)
if sigmoid == "gaussian":
@@ -54,17 +71,20 @@ def _sigmoids(x, value_at_1, sigmoid):
elif sigmoid == "cosine":
scale = np.arccos(2 * value_at_1 - 1) / np.pi
scaled_x = x * scale
- return np.where(abs(scaled_x) < 1, (1 + np.cos(np.pi * scaled_x)) / 2, 0.0)
+ ret = np.where(abs(scaled_x) < 1, (1 + np.cos(np.pi * scaled_x)) / 2, 0.0)
+ return ret.item() if np.isscalar(x) else ret
elif sigmoid == "linear":
scale = 1 - value_at_1
scaled_x = x * scale
- return np.where(abs(scaled_x) < 1, 1 - scaled_x, 0.0)
+ ret = np.where(abs(scaled_x) < 1, 1 - scaled_x, 0.0)
+ return ret.item() if np.isscalar(x) else ret
elif sigmoid == "quadratic":
scale = np.sqrt(1 - value_at_1)
scaled_x = x * scale
- return np.where(abs(scaled_x) < 1, 1 - scaled_x**2, 0.0)
+ ret = np.where(abs(scaled_x) < 1, 1 - scaled_x**2, 0.0)
+ return ret.item() if np.isscalar(x) else ret
elif sigmoid == "tanh_squared":
scale = np.arctanh(np.sqrt(1 - value_at_1))
@@ -75,29 +95,29 @@ def _sigmoids(x, value_at_1, sigmoid):
def tolerance(
- x,
- bounds=(0.0, 0.0),
- margin=0.0,
- sigmoid="gaussian",
- value_at_margin=_DEFAULT_VALUE_AT_MARGIN,
-):
+ x: X,
+ bounds: tuple[float, float] = (0.0, 0.0),
+ margin: float | np.floating[Any] = 0.0,
+ sigmoid: SIGMOID_TYPE = "gaussian",
+ value_at_margin: float = _DEFAULT_VALUE_AT_MARGIN,
+) -> X:
"""Returns 1 when `x` falls inside the bounds, between 0 and 1 otherwise.
Args:
- x: A scalar or numpy array.
+ x: The input.
bounds: A tuple of floats specifying inclusive `(lower, upper)` bounds for
the target interval. These can be infinite if the interval is unbounded
at one or both ends, or they can be equal to one another if the target
value is exact.
- margin: Float. Parameter that controls how steeply the output decreases as
+ margin: Parameter that controls how steeply the output decreases as
`x` moves out-of-bounds.
* If `margin == 0` then the output will be 0 for all values of `x`
outside of `bounds`.
* If `margin > 0` then the output will decrease sigmoidally with
increasing distance from the nearest bound.
- sigmoid: String, choice of sigmoid type. Valid values are: 'gaussian',
- 'linear', 'hyperbolic', 'long_tail', 'cosine', 'tanh_squared'.
- value_at_margin: A float between 0 and 1 specifying the output value when
+ sigmoid: Choice of sigmoid type. Valid values are 'gaussian', 'hyperbolic',
+ 'long_tail', 'reciprocal', 'cosine', 'linear', 'quadratic', 'tanh_squared'.
+ value_at_margin: A value between 0 and 1 specifying the output when
the distance from `x` to the nearest bound is equal to `margin`. Ignored
if `margin == 0`.
@@ -121,27 +141,32 @@ def tolerance(
d = np.where(x < lower, lower - x, x - upper) / margin
value = np.where(in_bounds, 1.0, _sigmoids(d, value_at_margin, sigmoid))
- return float(value) if np.isscalar(x) else value
+ return value.item() if np.isscalar(x) else value
-def inverse_tolerance(x, bounds=(0.0, 0.0), margin=0.0, sigmoid="reciprocal"):
+def inverse_tolerance(
+ x: X,
+ bounds: tuple[float, float] = (0.0, 0.0),
+ margin: float = 0.0,
+ sigmoid: SIGMOID_TYPE = "reciprocal",
+) -> X:
"""Returns 0 when `x` falls inside the bounds, between 1 and 0 otherwise.
Args:
- x: A scalar or numpy array.
+ x: The input
bounds: A tuple of floats specifying inclusive `(lower, upper)` bounds for
the target interval. These can be infinite if the interval is unbounded
at one or both ends, or they can be equal to one another if the target
value is exact.
- margin: Float. Parameter that controls how steeply the output decreases as
+ margin: Parameter that controls how steeply the output decreases as
`x` moves out-of-bounds.
* If `margin == 0` then the output will be 0 for all values of `x`
outside of `bounds`.
* If `margin > 0` then the output will decrease sigmoidally with
increasing distance from the nearest bound.
- sigmoid: String, choice of sigmoid type. Valid values are: 'gaussian',
- 'linear', 'hyperbolic', 'long_tail', 'cosine', 'tanh_squared'.
- value_at_margin: A float between 0 and 1 specifying the output value when
+ sigmoid: Choice of sigmoid type. Valid values are 'gaussian', 'hyperbolic',
+ 'long_tail', 'reciprocal', 'cosine', 'linear', 'quadratic', 'tanh_squared'.
+ value_at_margin: A value between 0 and 1 specifying the output when
the distance from `x` to the nearest bound is equal to `margin`. Ignored
if `margin == 0`.
@@ -158,24 +183,22 @@ def inverse_tolerance(x, bounds=(0.0, 0.0), margin=0.0, sigmoid="reciprocal"):
return 1 - bound
-def rect_prism_tolerance(curr, zero, one):
+def rect_prism_tolerance(
+ curr: npt.NDArray[np.float_],
+ zero: npt.NDArray[np.float_],
+ one: npt.NDArray[np.float_],
+) -> float:
"""Computes a reward if curr is inside a rectangular prism region.
- The 3d points curr and zero specify 2 diagonal corners of a rectangular
- prism that represents the decreasing region.
-
- one represents the corner of the prism that has a reward of 1.
- zero represents the diagonal opposite corner of the prism that has a reward
- of 0.
- Curr is the point that the prism reward region is being applied for.
+ All inputs are 3D points with shape (3,).
Args:
- curr(np.ndarray): The point whose reward is being assessed.
- shape is (3,).
- zero(np.ndarray): One corner of the rectangular prism, with reward 0.
- shape is (3,)
- one(np.ndarray): The diagonal opposite corner of one, with reward 1.
- shape is (3,)
+ curr: The point that the prism reward region is being applied for.
+ zero: The diagonal opposite corner of the prism with reward 0.
+ one: The corner of the prism with reward 1.
+
+ Returns:
+ A reward if curr is inside the prism, 1.0 otherwise.
"""
def in_range(a, b, c):
@@ -192,25 +215,24 @@ def in_range(a, b, c):
y_scale = (curr[1] - zero[1]) / diff[1]
z_scale = (curr[2] - zero[2]) / diff[2]
return x_scale * y_scale * z_scale
- # return 0.01
else:
return 1.0
-def hamacher_product(a, b):
- """The hamacher (t-norm) product of a and b.
+def hamacher_product(a: float, b: float) -> float:
+ """Returns the hamacher (t-norm) product of a and b.
- computes (a * b) / ((a + b) - (a * b))
+ Computes (a * b) / ((a + b) - (a * b)).
Args:
- a (float): 1st term of hamacher product.
- b (float): 2nd term of hamacher product.
+ a: 1st term of the hamacher product.
+ b: 2nd term of the hamacher product.
+
+ Returns:
+ The hammacher product of a and b
Raises:
ValueError: a and b must range between 0 and 1
-
- Returns:
- float: The hammacher product of a and b
"""
if not ((0.0 <= a <= 1.0) and (0.0 <= b <= 1.0)):
raise ValueError("a and b must range between 0 and 1")
diff --git a/metaworld/envs/mujoco/utils/rotation.py b/metaworld/envs/mujoco/utils/rotation.py
index 91a5e0717..58d81dcbf 100644
--- a/metaworld/envs/mujoco/utils/rotation.py
+++ b/metaworld/envs/mujoco/utils/rotation.py
@@ -24,13 +24,18 @@
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# Many methods borrow heavily or entirely from transforms3d:
-# https://github.com/matthew-brett/transforms3d
-# They have mostly been modified to support batched operations.
+"""Utilities for computing rotations in 3D space.
+
+Many methods borrow heavily or entirely from transforms3d: https://github.com/matthew-brett/transforms3d
+They have mostly been modified to support batched operations.
+"""
+from __future__ import annotations
import itertools
+from typing import Any
import numpy as np
+import numpy.typing as npt
"""
Rotations
@@ -98,10 +103,14 @@
_EPS4 = _FLOAT_EPS * 4.0
-def euler2mat(euler):
- """Convert Euler Angles to Rotation Matrix.
+def euler2mat(euler: npt.ArrayLike) -> npt.NDArray[np.float64]:
+ """Converts euler angles to rotation matrices.
+
+ Args:
+ euler: the euler angles. Can be batched and stored in any (nested) iterable.
- See rotation.py for notes.
+ Returns:
+ Rotation matrices corresponding to the euler angles, in double precision.
"""
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, f"Invalid shaped euler {euler}"
@@ -125,10 +134,14 @@ def euler2mat(euler):
return mat
-def euler2quat(euler):
- """Convert Euler Angles to Quaternions.
+def euler2quat(euler: npt.ArrayLike) -> npt.NDArray[np.float64]:
+ """Converts euler angles to quaternions.
- See rotation.py for notes.
+ Args:
+ euler: the euler angles. Can be batched and stored in any (nested) iterable.
+
+ Returns:
+ Quaternions corresponding to the euler angles, in double precision.
"""
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, f"Invalid shape euler {euler}"
@@ -147,10 +160,14 @@ def euler2quat(euler):
return quat
-def mat2euler(mat):
- """Convert Rotation Matrix to Euler Angles.
+def mat2euler(mat: npt.ArrayLike) -> npt.NDArray[np.float64]:
+ """Converts rotation matrices to euler angles.
+
+ Args:
+ mat: a 3D rotation matrix. Can be batched and stored in any (nested) iterable.
- See rotation.py for notes.
+ Returns:
+ Euler angles corresponding to the rotation matrices, in double precision.
"""
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), f"Invalid shape matrix {mat}"
@@ -172,10 +189,14 @@ def mat2euler(mat):
return euler
-def mat2quat(mat):
- """Convert Rotation Matrix to Quaternion.
+def mat2quat(mat: npt.ArrayLike) -> npt.NDArray[np.float64]:
+ """Converts rotation matrices to quaternions.
- See rotation.py for notes.
+ Args:
+ mat: a 3D rotation matrix. Can be batched and stored in any (nested) iterable.
+
+ Returns:
+ Quaternions corresponding to the rotation matrices, in double precision.
"""
mat = np.asarray(mat, dtype=np.float64)
assert mat.shape[-2:] == (3, 3), f"Invalid shape matrix {mat}"
@@ -212,15 +233,30 @@ def mat2quat(mat):
return q
-def quat2euler(quat):
- """Convert Quaternion to Euler Angles.
+def quat2euler(quat: npt.ArrayLike) -> npt.NDArray[np.float64]:
+ """Converts quaternions to euler angles.
+
+ Args:
+ quat: the quaternion. Can be batched and stored in any (nested) iterable.
- See rotation.py for notes.
+ Returns:
+ Euler angles corresponding to the quaternions, in double precision.
"""
return mat2euler(quat2mat(quat))
-def subtract_euler(e1, e2):
+def subtract_euler(
+ e1: npt.NDArray[Any], e2: npt.NDArray[Any]
+) -> npt.NDArray[np.float64]:
+ """Subtracts two euler angles.
+
+ Args:
+ e1: the first euler angles. Can be batched.
+ e2: the second euler angles. Can be batched.
+
+ Returns:
+ Euler angles corresponding to the difference between e1 and e2, in double precision.
+ """
assert e1.shape == e2.shape
assert e1.shape[-1] == 3
q1 = euler2quat(e1)
@@ -229,10 +265,14 @@ def subtract_euler(e1, e2):
return quat2euler(q_diff)
-def quat2mat(quat):
- """Convert Quaternion to Euler Angles.
+def quat2mat(quat: npt.ArrayLike) -> npt.NDArray[np.float64]:
+ """Converts quaternions to rotation matrices.
+
+ Args:
+ quat: the quaternion. Can be batched and stored in any (nested) iterable.
- See rotation.py for notes.
+ Returns:
+ Rotation matrices corresponding to the quaternions, in double precision.
"""
quat = np.asarray(quat, dtype=np.float64)
assert quat.shape[-1] == 4, f"Invalid shape quat {quat}"
@@ -258,13 +298,30 @@ def quat2mat(quat):
return np.where((Nq > _FLOAT_EPS)[..., np.newaxis, np.newaxis], mat, np.eye(3))
-def quat_conjugate(q):
+def quat_conjugate(q: npt.NDArray[Any]) -> npt.NDArray[Any]:
+ """Returns the conjugate of a quaternion.
+
+ Args:
+ q: the quaternion. Can be batched.
+
+ Returns:
+ The conjugate of the quaternion.
+ """
inv_q = -q
inv_q[..., 0] *= -1
return inv_q
-def quat_mul(q0, q1):
+def quat_mul(q0: npt.NDArray[Any], q1: npt.NDArray[Any]) -> npt.NDArray[Any]:
+ """Multiplies two quaternions.
+
+ Args:
+ q0: the first quaternion. Can be batched.
+ q1: the second quaternion. Can be batched.
+
+ Returns:
+ The product of `q0` and `q1`.
+ """
assert q0.shape == q1.shape
assert q0.shape[-1] == 4
assert q1.shape[-1] == 4
@@ -290,19 +347,37 @@ def quat_mul(q0, q1):
return q
-def quat_rot_vec(q, v0):
+def quat_rot_vec(q: npt.NDArray[Any], v0: npt.NDArray[Any]) -> npt.NDArray[np.float64]:
+ """Rotates a vector by a quaternion.
+
+ Args:
+ q: the quaternion.
+ v0: the vector.
+
+ Returns:
+ The rotated vector.
+ """
q_v0 = np.array([0, v0[0], v0[1], v0[2]])
q_v = quat_mul(q, quat_mul(q_v0, quat_conjugate(q)))
v = q_v[1:]
return v
-def quat_identity():
+def quat_identity() -> npt.NDArray[np.int_]:
+ """Returns the identity quaternion."""
return np.array([1, 0, 0, 0])
-def quat2axisangle(quat):
- theta = 0
+def quat2axisangle(quat: npt.NDArray[Any]) -> tuple[npt.NDArray[Any], float]:
+ """Converts a quaternion to an axis-angle representation.
+
+ Args:
+ quat: the quaternion.
+
+ Returns:
+ The axis-angle representation of `quat` as an `(axis, angle)` tuple.
+ """
+ theta = 0.0
axis = np.array([0, 0, 1])
sin_theta = np.linalg.norm(quat[1:])
@@ -314,7 +389,15 @@ def quat2axisangle(quat):
return axis, theta
-def euler2point_euler(euler):
+def euler2point_euler(euler: npt.NDArray[Any]) -> npt.NDArray[Any]:
+ """Convert euler angles to 2D points on the unit circle for each one.
+
+ Args:
+ euler: the euler angles. Can optionally have 1 batch dimension.
+
+ Returns:
+ 2D points on the unit circle for each axis, returned as [`sin_x`, `sin_y`, `sin_z`, `cos_x`, `cos_y`, `cos_z`].
+ """
_euler = euler.copy()
if len(_euler.shape) < 2:
_euler = np.expand_dims(_euler, 0)
@@ -324,7 +407,16 @@ def euler2point_euler(euler):
return np.concatenate([_euler_sin, _euler_cos], axis=-1)
-def point_euler2euler(euler):
+def point_euler2euler(euler: npt.NDArray[Any]) -> npt.NDArray[Any]:
+ """Convert 2D points on the unit circle for each axis to euler angles.
+
+ Args:
+ euler: 2D points on the unit circle for each axis, stored as [`sin_x`, `sin_y`, `sin_z`, `cos_x`, `cos_y`, `cos_z`].
+ Can optionally have 1 batch dimension.
+
+ Returns:
+ The corresponding euler angles expressed as scalars.
+ """
_euler = euler.copy()
if len(_euler.shape) < 2:
_euler = np.expand_dims(_euler, 0)
@@ -334,7 +426,16 @@ def point_euler2euler(euler):
return angle
-def quat2point_quat(quat):
+def quat2point_quat(quat: npt.NDArray[Any]) -> npt.NDArray[Any]:
+ """Convert the quaternion's angle to 2D points on the unit circle for each axis in 3D space.
+
+ Args:
+ quat: the quaternion. Can optionally have 1 batch dimension.
+
+ Returns:
+ A quaternion with its angle expressed as 2D points on the unit circle for each axis in 3D space, returned as
+ [`sin_x`, `sin_y`, `sin_z`, `cos_x`, `cos_y`, `cos_z`, `quat_axis_x`, `quat_axis_y`, `quat_axis_z`].
+ """
# Should be in qw, qx, qy, qz
_quat = quat.copy()
if len(_quat.shape) < 2:
@@ -348,7 +449,17 @@ def quat2point_quat(quat):
return np.concatenate([np.sin(angle), np.cos(angle), xyz], axis=-1)
-def point_quat2quat(quat):
+def point_quat2quat(quat: npt.NDArray[Any]) -> npt.NDArray[Any]:
+ """Convert 2D points on the unit circle for each axis to quaternions.
+
+ Args:
+ quat: A quaternion with its angle expressed as 2D points on the unit circle for each axis in 3D space, stored as
+ [`sin_x`, `sin_y`, `sin_z`, `cos_x`, `cos_y`, `cos_z`, `quat_axis_x`, `quat_axis_y`, `quat_axis_z`].
+ Can optionally have 1 batch dimension.
+
+ Returns:
+ The quaternion with its angle expressed as a scalar.
+ """
_quat = quat.copy()
if len(_quat.shape) < 2:
_quat = np.expand_dims(_quat, 0)
@@ -363,7 +474,7 @@ def point_quat2quat(quat):
return np.concatenate([qw, qxyz], axis=-1)
-def normalize_angles(angles):
+def normalize_angles(angles: npt.NDArray[Any]) -> npt.NDArray[Any]:
"""Puts angles in [-pi, pi] range."""
angles = angles.copy()
if angles.size > 0:
@@ -372,15 +483,15 @@ def normalize_angles(angles):
return angles
-def round_to_straight_angles(angles):
+def round_to_straight_angles(angles: npt.NDArray[Any]) -> npt.NDArray[Any]:
"""Returns closest angle modulo 90 degrees."""
angles = np.round(angles / (np.pi / 2)) * (np.pi / 2)
return normalize_angles(angles)
-def get_parallel_rotations():
+def get_parallel_rotations() -> list[npt.NDArray[Any]]:
mult90 = [0, np.pi / 2, -np.pi / 2, np.pi]
- parallel_rotations = []
+ parallel_rotations: list[npt.NDArray] = []
for euler in itertools.product(mult90, repeat=3):
canonical = mat2euler(euler2mat(euler))
canonical = np.round(canonical / (np.pi / 2))
@@ -390,6 +501,6 @@ def get_parallel_rotations():
canonical[2] = 2
canonical *= np.pi / 2
if all([(canonical != rot).any() for rot in parallel_rotations]):
- parallel_rotations += [canonical]
+ parallel_rotations.append(canonical)
assert len(parallel_rotations) == 24
return parallel_rotations
diff --git a/metaworld/policies/action.py b/metaworld/policies/action.py
index c578f93d0..65e2c2ccf 100644
--- a/metaworld/policies/action.py
+++ b/metaworld/policies/action.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
class Action:
@@ -9,28 +14,26 @@ class Action:
available as an instance variable.
"""
- def __init__(self, structure):
+ def __init__(self, structure: dict[str, npt.NDArray[Any] | int]) -> None:
"""Action.
Args:
- structure (dict): Map from field names to output array indices
+ structure: Map from field names to output array indices
"""
self._structure = structure
self.array = np.zeros(len(self), dtype=np.float32)
- def __len__(self):
+ def __len__(self) -> int:
return sum(
[1 if isinstance(idx, int) else len(idx) for idx in self._structure.items()]
)
- def __getitem__(self, key):
+ def __getitem__(self, key) -> npt.NDArray[np.float32]:
assert key in self._structure, (
"This action's structure does not contain %s" % key
)
return self.array[self._structure[key]]
- def __setitem__(self, key, value):
- assert key in self._structure, (
- "This action's structure does not contain %s" % key
- )
+ def __setitem__(self, key: str, value) -> None:
+ assert key in self._structure, f"This action's structure does not contain {key}"
self.array[self._structure[key]] = value
diff --git a/metaworld/policies/policy.py b/metaworld/policies/policy.py
index 91c408f5b..4d76fd5b1 100644
--- a/metaworld/policies/policy.py
+++ b/metaworld/policies/policy.py
@@ -1,20 +1,26 @@
+from __future__ import annotations
+
import abc
import warnings
+from typing import Any, Callable
import numpy as np
+import numpy.typing as npt
-def assert_fully_parsed(func):
+def assert_fully_parsed(
+ func: Callable[[npt.NDArray[np.float64]], dict[str, npt.NDArray[np.float64]]]
+) -> Callable[[npt.NDArray[np.float64]], dict[str, npt.NDArray[np.float64]]]:
"""Decorator function to ensure observations are fully parsed.
Args:
- func (Callable): The function to check
+ func: The function to check
Returns:
- (Callable): The input function, decorated to assert full parsing
+ The input function, decorated to assert full parsing
"""
- def inner(obs):
+ def inner(obs) -> dict[str, Any]:
obs_dict = func(obs)
assert len(obs) == sum(
[len(i) if isinstance(i, np.ndarray) else 1 for i in obs_dict.values()]
@@ -24,17 +30,18 @@ def inner(obs):
return inner
-def move(from_xyz, to_xyz, p):
+def move(
+ from_xyz: npt.NDArray[Any], to_xyz: npt.NDArray[Any], p: float
+) -> npt.NDArray[Any]:
"""Computes action components that help move from 1 position to another.
Args:
- from_xyz (np.ndarray): The coordinates to move from (usually current position)
- to_xyz (np.ndarray): The coordinates to move to
- p (float): constant to scale response
+ from_xyz: The coordinates to move from (usually current position)
+ to_xyz: The coordinates to move to
+ p: constant to scale response
Returns:
- (np.ndarray): Response that will decrease abs(to_xyz - from_xyz)
-
+ Response that will decrease abs(to_xyz - from_xyz)
"""
error = to_xyz - from_xyz
response = p * error
@@ -47,27 +54,29 @@ def move(from_xyz, to_xyz, p):
class Policy(abc.ABC):
+ """Abstract base class for policies."""
+
@staticmethod
@abc.abstractmethod
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
"""Pulls pertinent information out of observation and places in a dict.
Args:
- obs (np.ndarray): Observation which conforms to env.observation_space
+ obs: Observation which conforms to env.observation_space
Returns:
dict: Dictionary which contains information from the observation
"""
- pass
+ raise NotImplementedError
@abc.abstractmethod
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
"""Gets an action in response to an observation.
Args:
- obs (np.ndarray): Observation which conforms to env.observation_space
+ obs: Observation which conforms to env.observation_space
Returns:
- np.ndarray: Array (usually 4 elements) representing the action to take
+ Array (usually 4 elements) representing the action to take
"""
- pass
+ raise NotImplementedError
diff --git a/metaworld/policies/sawyer_assembly_v1_policy.py b/metaworld/policies/sawyer_assembly_v1_policy.py
index 357b2e345..efe6a390d 100644
--- a/metaworld/policies/sawyer_assembly_v1_policy.py
+++ b/metaworld/policies/sawyer_assembly_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerAssemblyV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"wrench_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[6:9],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([0.01, 0.0, 0.0])
pos_peg = o_d["peg_pos"] + np.array([0.07, 0.0, 0.15])
@@ -50,7 +55,7 @@ def _desired_pos(o_d):
return pos_peg
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([0.01, 0.0, 0.0])
pos_peg = o_d["peg_pos"] + np.array([0.07, 0.0, 0.15])
diff --git a/metaworld/policies/sawyer_assembly_v2_policy.py b/metaworld/policies/sawyer_assembly_v2_policy.py
index 492f84686..4b5378ae6 100644
--- a/metaworld/policies/sawyer_assembly_v2_policy.py
+++ b/metaworld/policies/sawyer_assembly_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerAssemblyV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"unused_info": obs[7:-3],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([-0.02, 0.0, 0.0])
pos_peg = o_d["peg_pos"] + np.array([0.12, 0.0, 0.14])
@@ -49,7 +54,7 @@ def _desired_pos(o_d):
return pos_peg
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([-0.02, 0.0, 0.0])
# pos_peg = o_d["peg_pos"] + np.array([0.12, 0.0, 0.14])
diff --git a/metaworld/policies/sawyer_basketball_v1_policy.py b/metaworld/policies/sawyer_basketball_v1_policy.py
index 09bcd0969..67d4cc8cf 100644
--- a/metaworld/policies/sawyer_basketball_v1_policy.py
+++ b/metaworld/policies/sawyer_basketball_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerBasketballV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"ball_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[[6, 7, 8, 10, 11]],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_ball = o_d["ball_pos"] + np.array([0.0, 0.0, 0.01])
# X is given by hoop_pos
@@ -46,7 +51,7 @@ def _desired_pos(o_d):
return pos_hoop
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_ball = o_d["ball_pos"]
diff --git a/metaworld/policies/sawyer_basketball_v2_policy.py b/metaworld/policies/sawyer_basketball_v2_policy.py
index cd0cb9bb7..d2ebefc8f 100644
--- a/metaworld/policies/sawyer_basketball_v2_policy.py
+++ b/metaworld/policies/sawyer_basketball_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerBasketballV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -17,7 +22,7 @@ def _parse_obs(obs):
"unused_info": obs[7:-3],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
action["delta_pos"] = move(
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_ball = o_d["ball_pos"] + np.array([0.0, 0.0, 0.01])
# X is given by hoop_pos
@@ -45,7 +50,7 @@ def _desired_pos(o_d):
return pos_hoop
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_ball = o_d["ball_pos"]
if (
diff --git a/metaworld/policies/sawyer_bin_picking_v2_policy.py b/metaworld/policies/sawyer_bin_picking_v2_policy.py
index d1aec98a4..53464d96d 100644
--- a/metaworld/policies/sawyer_bin_picking_v2_policy.py
+++ b/metaworld/policies/sawyer_bin_picking_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerBinPickingV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"extra_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"] + np.array([0.0, 0.0, 0.03])
pos_bin = np.array([0.12, 0.7, 0.02])
@@ -51,7 +56,7 @@ def _desired_pos(o_d):
return pos_bin
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"] + np.array([0.0, 0.0, 0.03])
diff --git a/metaworld/policies/sawyer_box_close_v1_policy.py b/metaworld/policies/sawyer_box_close_v1_policy.py
index 0a26f0286..6d567a3b9 100644
--- a/metaworld/policies/sawyer_box_close_v1_policy.py
+++ b/metaworld/policies/sawyer_box_close_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerBoxCloseV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"lid_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"extra_info": obs[[6, 7, 8, 11]],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_lid = o_d["lid_pos"] + np.array([-0.04, 0.0, -0.06])
pos_box = np.array([*o_d["box_pos"], 0.15]) + np.array([-0.04, 0.0, 0.0])
@@ -47,7 +52,7 @@ def _desired_pos(o_d):
return pos_box
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["lid_pos"] + np.array([-0.04, 0.0, -0.06])
diff --git a/metaworld/policies/sawyer_box_close_v2_policy.py b/metaworld/policies/sawyer_box_close_v2_policy.py
index 45605068e..f4b967548 100644
--- a/metaworld/policies/sawyer_box_close_v2_policy.py
+++ b/metaworld/policies/sawyer_box_close_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerBoxCloseV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -17,7 +22,7 @@ def _parse_obs(obs):
"extra_info_2": obs[-1],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_lid = o_d["lid_pos"] + np.array([0.0, 0.0, +0.02])
pos_box = np.array([*o_d["box_pos"], 0.15]) + np.array([0.0, 0.0, 0.0])
@@ -48,7 +53,7 @@ def _desired_pos(o_d):
return pos_box
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_lid = o_d["lid_pos"] + np.array([0.0, 0.0, +0.02])
diff --git a/metaworld/policies/sawyer_button_press_topdown_v1_policy.py b/metaworld/policies/sawyer_button_press_topdown_v1_policy.py
index a36d7e71b..faca3b60c 100644
--- a/metaworld/policies/sawyer_button_press_topdown_v1_policy.py
+++ b/metaworld/policies/sawyer_button_press_topdown_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerButtonPressTopdownV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"button_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"]
diff --git a/metaworld/policies/sawyer_button_press_topdown_v2_policy.py b/metaworld/policies/sawyer_button_press_topdown_v2_policy.py
index 0ff004868..d8a685c9a 100644
--- a/metaworld/policies/sawyer_button_press_topdown_v2_policy.py
+++ b/metaworld/policies/sawyer_button_press_topdown_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerButtonPressTopdownV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"hand_closed": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"]
diff --git a/metaworld/policies/sawyer_button_press_topdown_wall_v1_policy.py b/metaworld/policies/sawyer_button_press_topdown_wall_v1_policy.py
index 6805fe311..5a93fe688 100644
--- a/metaworld/policies/sawyer_button_press_topdown_wall_v1_policy.py
+++ b/metaworld/policies/sawyer_button_press_topdown_wall_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerButtonPressTopdownWallV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"button_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, -0.06, 0.0])
diff --git a/metaworld/policies/sawyer_button_press_topdown_wall_v2_policy.py b/metaworld/policies/sawyer_button_press_topdown_wall_v2_policy.py
index 4bfc77126..fddfb8d28 100644
--- a/metaworld/policies/sawyer_button_press_topdown_wall_v2_policy.py
+++ b/metaworld/policies/sawyer_button_press_topdown_wall_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerButtonPressTopdownWallV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"hand_closed": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, -0.06, 0.0])
diff --git a/metaworld/policies/sawyer_button_press_v1_policy.py b/metaworld/policies/sawyer_button_press_v1_policy.py
index 8fcd3d9c4..baf1ac26d 100644
--- a/metaworld/policies/sawyer_button_press_v1_policy.py
+++ b/metaworld/policies/sawyer_button_press_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, move
@@ -6,25 +11,27 @@
class SawyerButtonPressV1Policy(Policy):
@staticmethod
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"button_start_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
- action["delta_pos"] = move(o_d["hand_pos"], to_xyz=self.desired_pos(o_d), p=4.0)
+ action["delta_pos"] = move(
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=4.0
+ )
action["grab_effort"] = 0.0
return action.array
@staticmethod
- def desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_start_pos"] + np.array([0.0, 0.0, -0.07])
diff --git a/metaworld/policies/sawyer_button_press_v2_policy.py b/metaworld/policies/sawyer_button_press_v2_policy.py
index 55e9d01ed..82d7e6548 100644
--- a/metaworld/policies/sawyer_button_press_v2_policy.py
+++ b/metaworld/policies/sawyer_button_press_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, move
@@ -6,7 +11,7 @@
class SawyerButtonPressV2Policy(Policy):
@staticmethod
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"hand_closed": obs[3],
@@ -14,20 +19,20 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self.desired_pos(o_d), p=25.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=25.0
)
action["grab_effort"] = 0.0
return action.array
@staticmethod
- def desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, 0.0, -0.07])
diff --git a/metaworld/policies/sawyer_button_press_wall_v1_policy.py b/metaworld/policies/sawyer_button_press_wall_v1_policy.py
index fa9748cdf..f0ed3ff30 100644
--- a/metaworld/policies/sawyer_button_press_wall_v1_policy.py
+++ b/metaworld/policies/sawyer_button_press_wall_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, move
@@ -6,14 +11,14 @@
class SawyerButtonPressWallV1Policy(Policy):
@staticmethod
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"button_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -26,7 +31,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, 0.0, 0.04])
@@ -40,7 +45,7 @@ def _desired_pos(o_d):
return pos_button + np.array([0.0, -0.02, 0.0])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, 0.0, 0.04])
diff --git a/metaworld/policies/sawyer_button_press_wall_v2_policy.py b/metaworld/policies/sawyer_button_press_wall_v2_policy.py
index c254b7ad1..16635379d 100644
--- a/metaworld/policies/sawyer_button_press_wall_v2_policy.py
+++ b/metaworld/policies/sawyer_button_press_wall_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, move
@@ -6,7 +11,7 @@
class SawyerButtonPressWallV2Policy(Policy):
@staticmethod
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"hand_closed": obs[3],
@@ -14,7 +19,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, 0.0, 0.04])
@@ -41,7 +46,7 @@ def _desired_pos(o_d):
return pos_button + np.array([0.0, -0.02, 0.0])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, 0.0, 0.04])
diff --git a/metaworld/policies/sawyer_coffee_button_v1_policy.py b/metaworld/policies/sawyer_coffee_button_v1_policy.py
index 4764dbdcb..6925f8efa 100644
--- a/metaworld/policies/sawyer_coffee_button_v1_policy.py
+++ b/metaworld/policies/sawyer_coffee_button_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerCoffeeButtonV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"mug_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"] + np.array([0.0, 0.0, 0.01])
diff --git a/metaworld/policies/sawyer_coffee_button_v2_policy.py b/metaworld/policies/sawyer_coffee_button_v2_policy.py
index 9142f5afd..3a451961e 100644
--- a/metaworld/policies/sawyer_coffee_button_v2_policy.py
+++ b/metaworld/policies/sawyer_coffee_button_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerCoffeeButtonV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["button_pos"] + np.array([0.0, 0.0, -0.07])
diff --git a/metaworld/policies/sawyer_coffee_pull_v1_policy.py b/metaworld/policies/sawyer_coffee_pull_v1_policy.py
index 94bfc0e2e..9361b7044 100644
--- a/metaworld/policies/sawyer_coffee_pull_v1_policy.py
+++ b/metaworld/policies/sawyer_coffee_pull_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerCoffeePullV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"mug_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"]
@@ -41,7 +46,7 @@ def _desired_pos(o_d):
return np.array([pos_curr[0] - 0.1, 0.62, 0.1])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"]
diff --git a/metaworld/policies/sawyer_coffee_pull_v2_policy.py b/metaworld/policies/sawyer_coffee_pull_v2_policy.py
index 6852c426b..6a812b9bc 100644
--- a/metaworld/policies/sawyer_coffee_pull_v2_policy.py
+++ b/metaworld/policies/sawyer_coffee_pull_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerCoffeePullV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"target_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"] + np.array([-0.005, 0.0, 0.05])
@@ -41,7 +46,7 @@ def _desired_pos(o_d):
return o_d["target_pos"]
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"] + np.array([0.01, 0.0, 0.05])
diff --git a/metaworld/policies/sawyer_coffee_push_v1_policy.py b/metaworld/policies/sawyer_coffee_push_v1_policy.py
index 251a781d3..1627056b6 100644
--- a/metaworld/policies/sawyer_coffee_push_v1_policy.py
+++ b/metaworld/policies/sawyer_coffee_push_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerCoffeePushV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"mug_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[[6, 7, 8, 11]],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"] + np.array([0.0, 0.0, 0.01])
pos_goal = o_d["goal_xy"]
@@ -41,7 +46,7 @@ def _desired_pos(o_d):
return np.array([pos_goal[0], pos_goal[1], 0.1])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"]
diff --git a/metaworld/policies/sawyer_coffee_push_v2_policy.py b/metaworld/policies/sawyer_coffee_push_v2_policy.py
index d029458a4..dbc8c645a 100644
--- a/metaworld/policies/sawyer_coffee_push_v2_policy.py
+++ b/metaworld/policies/sawyer_coffee_push_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerCoffeePushV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -17,7 +22,7 @@ def _parse_obs(obs):
"unused_info_2": obs[-1],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -30,7 +35,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"] + np.array([0.01, 0.0, 0.05])
pos_goal = o_d["goal_xy"]
@@ -43,7 +48,7 @@ def _desired_pos(o_d):
return np.array([pos_goal[0], pos_goal[1], 0.1])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_mug = o_d["mug_pos"] + np.array([0.01, 0.0, 0.05])
diff --git a/metaworld/policies/sawyer_dial_turn_v1_policy.py b/metaworld/policies/sawyer_dial_turn_v1_policy.py
index e2510aebd..95ee4af17 100644
--- a/metaworld/policies/sawyer_dial_turn_v1_policy.py
+++ b/metaworld/policies/sawyer_dial_turn_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,27 +12,27 @@
class SawyerDialTurnV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"dial_pos": obs[3:6],
"goal_pos": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_pow": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self._desired_xyz(o_d), p=5.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=5.0
)
action["grab_pow"] = 0.0
return action.array
@staticmethod
- def _desired_xyz(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
dial_pos = o_d["dial_pos"] + np.array([0.0, -0.028, 0.0])
if abs(hand_pos[2] - dial_pos[2]) > 0.02:
diff --git a/metaworld/policies/sawyer_dial_turn_v2_policy.py b/metaworld/policies/sawyer_dial_turn_v2_policy.py
index 535da0c40..096408565 100644
--- a/metaworld/policies/sawyer_dial_turn_v2_policy.py
+++ b/metaworld/policies/sawyer_dial_turn_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDialTurnV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_gripper_open": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"extra_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_pow": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
dial_pos = o_d["dial_pos"] + np.array([0.05, 0.02, 0.09])
diff --git a/metaworld/policies/sawyer_disassemble_v1_policy.py b/metaworld/policies/sawyer_disassemble_v1_policy.py
index 7aaa2c008..b15c28926 100644
--- a/metaworld/policies/sawyer_disassemble_v1_policy.py
+++ b/metaworld/policies/sawyer_disassemble_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDisassembleV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"wrench_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[6:9],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([0.01, -0.01, 0.01])
pos_peg = o_d["peg_pos"] + np.array([0.07, 0.0, 0.15])
@@ -47,7 +52,7 @@ def _desired_pos(o_d):
return pos_curr + np.array([0.0, -0.1, 0.0])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([0.01, 0.0, 0.0])
diff --git a/metaworld/policies/sawyer_disassemble_v2_policy.py b/metaworld/policies/sawyer_disassemble_v2_policy.py
index c5e892a77..bdc9e397d 100644
--- a/metaworld/policies/sawyer_disassemble_v2_policy.py
+++ b/metaworld/policies/sawyer_disassemble_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDisassembleV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"unused_info": obs[7:-3],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([-0.02, 0.0, 0.01])
# pos_peg = o_d["peg_pos"] + np.array([0.12, 0.0, 0.14])
@@ -45,7 +50,7 @@ def _desired_pos(o_d):
return pos_curr + np.array([0.0, 0.0, 0.1])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_wrench = o_d["wrench_pos"] + np.array([-0.02, 0.0, 0.01])
diff --git a/metaworld/policies/sawyer_door_close_v1_policy.py b/metaworld/policies/sawyer_door_close_v1_policy.py
index e1cce9b86..984b20940 100644
--- a/metaworld/policies/sawyer_door_close_v1_policy.py
+++ b/metaworld/policies/sawyer_door_close_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerDoorCloseV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"door_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_door = o_d["door_pos"]
pos_door += np.array([0.13, 0.1, 0.02])
diff --git a/metaworld/policies/sawyer_door_close_v2_policy.py b/metaworld/policies/sawyer_door_close_v2_policy.py
index 619a17c52..9b6997b63 100644
--- a/metaworld/policies/sawyer_door_close_v2_policy.py
+++ b/metaworld/policies/sawyer_door_close_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDoorCloseV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_door = o_d["door_pos"]
pos_door += np.array([0.05, 0.12, 0.1])
diff --git a/metaworld/policies/sawyer_door_lock_v1_policy.py b/metaworld/policies/sawyer_door_lock_v1_policy.py
index f1c685e72..2da5e6151 100644
--- a/metaworld/policies/sawyer_door_lock_v1_policy.py
+++ b/metaworld/policies/sawyer_door_lock_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerDoorLockV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"lock_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_lock = o_d["lock_pos"] + np.array([0.0, -0.05, 0.0])
diff --git a/metaworld/policies/sawyer_door_lock_v2_policy.py b/metaworld/policies/sawyer_door_lock_v2_policy.py
index e8840b082..546d1f26f 100644
--- a/metaworld/policies/sawyer_door_lock_v2_policy.py
+++ b/metaworld/policies/sawyer_door_lock_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDoorLockV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_lock = o_d["lock_pos"] + np.array([-0.02, -0.02, 0.0])
diff --git a/metaworld/policies/sawyer_door_open_v1_policy.py b/metaworld/policies/sawyer_door_open_v1_policy.py
index 0f74cd934..39596b777 100644
--- a/metaworld/policies/sawyer_door_open_v1_policy.py
+++ b/metaworld/policies/sawyer_door_open_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerDoorOpenV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"door_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_door = o_d["door_pos"]
pos_door[0] -= 0.05
diff --git a/metaworld/policies/sawyer_door_open_v2_policy.py b/metaworld/policies/sawyer_door_open_v2_policy.py
index ca82da068..4771e3f79 100644
--- a/metaworld/policies/sawyer_door_open_v2_policy.py
+++ b/metaworld/policies/sawyer_door_open_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDoorOpenV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_door = o_d["door_pos"]
pos_door[0] -= 0.05
diff --git a/metaworld/policies/sawyer_door_unlock_v1_policy.py b/metaworld/policies/sawyer_door_unlock_v1_policy.py
index 2fa3f92d2..f33cc5122 100644
--- a/metaworld/policies/sawyer_door_unlock_v1_policy.py
+++ b/metaworld/policies/sawyer_door_unlock_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerDoorUnlockV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"lock_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_lock = o_d["lock_pos"] + np.array([-0.03, -0.03, -0.1])
diff --git a/metaworld/policies/sawyer_door_unlock_v2_policy.py b/metaworld/policies/sawyer_door_unlock_v2_policy.py
index a3d3cbb18..eb8fe650c 100644
--- a/metaworld/policies/sawyer_door_unlock_v2_policy.py
+++ b/metaworld/policies/sawyer_door_unlock_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDoorUnlockV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_lock = o_d["lock_pos"] + np.array([-0.04, -0.02, -0.03])
diff --git a/metaworld/policies/sawyer_drawer_close_v1_policy.py b/metaworld/policies/sawyer_drawer_close_v1_policy.py
index 59f015570..63fd468b5 100644
--- a/metaworld/policies/sawyer_drawer_close_v1_policy.py
+++ b/metaworld/policies/sawyer_drawer_close_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerDrawerCloseV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"drwr_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_drwr = o_d["drwr_pos"]
diff --git a/metaworld/policies/sawyer_drawer_close_v2_policy.py b/metaworld/policies/sawyer_drawer_close_v2_policy.py
index 5c6734ff9..fa212dc0a 100644
--- a/metaworld/policies/sawyer_drawer_close_v2_policy.py
+++ b/metaworld/policies/sawyer_drawer_close_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerDrawerCloseV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_grasp_info": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_drwr = o_d["drwr_pos"] + np.array([0.0, 0.0, -0.02])
diff --git a/metaworld/policies/sawyer_drawer_open_v1_policy.py b/metaworld/policies/sawyer_drawer_open_v1_policy.py
index 2ecdafab1..b5240245b 100644
--- a/metaworld/policies/sawyer_drawer_open_v1_policy.py
+++ b/metaworld/policies/sawyer_drawer_open_v1_policy.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +10,14 @@
class SawyerDrawerOpenV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"drwr_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
diff --git a/metaworld/policies/sawyer_drawer_open_v2_policy.py b/metaworld/policies/sawyer_drawer_open_v2_policy.py
index 4cac540b9..9e7a519c8 100644
--- a/metaworld/policies/sawyer_drawer_open_v2_policy.py
+++ b/metaworld/policies/sawyer_drawer_open_v2_policy.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +10,7 @@
class SawyerDrawerOpenV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +18,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
diff --git a/metaworld/policies/sawyer_faucet_close_v1_policy.py b/metaworld/policies/sawyer_faucet_close_v1_policy.py
index 301324393..19058e007 100644
--- a/metaworld/policies/sawyer_faucet_close_v1_policy.py
+++ b/metaworld/policies/sawyer_faucet_close_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerFaucetCloseV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"faucet_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_faucet = o_d["faucet_pos"] + np.array([0.02, 0.0, 0.0])
diff --git a/metaworld/policies/sawyer_faucet_close_v2_policy.py b/metaworld/policies/sawyer_faucet_close_v2_policy.py
index 2ed500f51..8367723e7 100644
--- a/metaworld/policies/sawyer_faucet_close_v2_policy.py
+++ b/metaworld/policies/sawyer_faucet_close_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerFaucetCloseV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_faucet = o_d["faucet_pos"] + np.array([+0.04, 0.0, 0.03])
diff --git a/metaworld/policies/sawyer_faucet_open_v1_policy.py b/metaworld/policies/sawyer_faucet_open_v1_policy.py
index efcc99d59..72004d27b 100644
--- a/metaworld/policies/sawyer_faucet_open_v1_policy.py
+++ b/metaworld/policies/sawyer_faucet_open_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerFaucetOpenV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"faucet_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_faucet = o_d["faucet_pos"] + np.array([-0.02, 0.0, 0.0])
diff --git a/metaworld/policies/sawyer_faucet_open_v2_policy.py b/metaworld/policies/sawyer_faucet_open_v2_policy.py
index 58ea520b0..07fd883b0 100644
--- a/metaworld/policies/sawyer_faucet_open_v2_policy.py
+++ b/metaworld/policies/sawyer_faucet_open_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerFaucetOpenV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_faucet = o_d["faucet_pos"] + np.array([-0.04, 0.0, 0.03])
diff --git a/metaworld/policies/sawyer_hammer_v1_policy.py b/metaworld/policies/sawyer_hammer_v1_policy.py
index 0f2d206e2..0d1661557 100644
--- a/metaworld/policies/sawyer_hammer_v1_policy.py
+++ b/metaworld/policies/sawyer_hammer_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerHammerV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"hammer_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["hammer_pos"] + np.array([-0.08, 0.0, -0.01])
pos_goal = np.array([0.24, 0.71, 0.11]) + np.array([-0.19, 0.0, 0.05])
@@ -46,7 +51,7 @@ def _desired_pos(o_d):
return pos_goal
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["hammer_pos"] + np.array([-0.08, 0.0, -0.01])
diff --git a/metaworld/policies/sawyer_hammer_v2_policy.py b/metaworld/policies/sawyer_hammer_v2_policy.py
index 707c95e52..98d484aed 100644
--- a/metaworld/policies/sawyer_hammer_v2_policy.py
+++ b/metaworld/policies/sawyer_hammer_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerHammerV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["hammer_pos"] + np.array([-0.04, 0.0, -0.01])
pos_goal = np.array([0.24, 0.71, 0.11]) + np.array([-0.19, 0.0, 0.05])
@@ -46,7 +51,7 @@ def _desired_pos(o_d):
return pos_goal
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["hammer_pos"] + np.array([-0.04, 0.0, -0.01])
diff --git a/metaworld/policies/sawyer_hand_insert_v1_policy.py b/metaworld/policies/sawyer_hand_insert_v1_policy.py
index d63e89015..3b3d75a64 100644
--- a/metaworld/policies/sawyer_hand_insert_v1_policy.py
+++ b/metaworld/policies/sawyer_hand_insert_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerHandInsertV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"obj_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[6:9],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
obj_pos = o_d["obj_pos"]
goal_pos = o_d["goal_pos"]
@@ -46,7 +51,7 @@ def _desired_pos(o_d):
return goal_pos
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
hand_pos = o_d["hand_pos"]
obj_pos = o_d["obj_pos"]
diff --git a/metaworld/policies/sawyer_hand_insert_v2_policy.py b/metaworld/policies/sawyer_hand_insert_v2_policy.py
index 44e03b528..8037598ac 100644
--- a/metaworld/policies/sawyer_hand_insert_v2_policy.py
+++ b/metaworld/policies/sawyer_hand_insert_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerHandInsertV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"unused_info": obs[7:-3],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
obj_pos = o_d["obj_pos"]
goal_pos = o_d["goal_pos"]
@@ -47,7 +52,7 @@ def _desired_pos(o_d):
return goal_pos
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
hand_pos = o_d["hand_pos"]
obj_pos = o_d["obj_pos"]
if (
diff --git a/metaworld/policies/sawyer_handle_press_side_v2_policy.py b/metaworld/policies/sawyer_handle_press_side_v2_policy.py
index 565748629..5cd684b2e 100644
--- a/metaworld/policies/sawyer_handle_press_side_v2_policy.py
+++ b/metaworld/policies/sawyer_handle_press_side_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerHandlePressSideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["handle_pos"]
diff --git a/metaworld/policies/sawyer_handle_press_v1_policy.py b/metaworld/policies/sawyer_handle_press_v1_policy.py
index f4a8ef494..b4981d5e1 100644
--- a/metaworld/policies/sawyer_handle_press_v1_policy.py
+++ b/metaworld/policies/sawyer_handle_press_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerHandlePressV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"handle_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["handle_pos"] + np.array([0.0, -0.02, 0.0])
diff --git a/metaworld/policies/sawyer_handle_press_v2_policy.py b/metaworld/policies/sawyer_handle_press_v2_policy.py
index 0d1686953..657e628b5 100644
--- a/metaworld/policies/sawyer_handle_press_v2_policy.py
+++ b/metaworld/policies/sawyer_handle_press_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerHandlePressV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["handle_pos"] + np.array([0.0, -0.02, 0.0])
diff --git a/metaworld/policies/sawyer_handle_pull_side_v1_policy.py b/metaworld/policies/sawyer_handle_pull_side_v1_policy.py
index fd08c3f74..41c533009 100644
--- a/metaworld/policies/sawyer_handle_pull_side_v1_policy.py
+++ b/metaworld/policies/sawyer_handle_pull_side_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerHandlePullSideV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"handle_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["handle_pos"] + np.array([0.02, 0.0, 0.0])
diff --git a/metaworld/policies/sawyer_handle_pull_side_v2_policy.py b/metaworld/policies/sawyer_handle_pull_side_v2_policy.py
index 24ab35282..a8855de97 100644
--- a/metaworld/policies/sawyer_handle_pull_side_v2_policy.py
+++ b/metaworld/policies/sawyer_handle_pull_side_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerHandlePullSideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"handle_pos": obs[4:7],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_handle = o_d["handle_pos"]
if np.linalg.norm(pos_curr[:2] - pos_handle[:2]) > 0.04:
@@ -37,7 +42,7 @@ def _desired_pos(o_d):
return pos_handle + np.array([0.0, 0.0, 1.0])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_handle = o_d["handle_pos"]
if (
diff --git a/metaworld/policies/sawyer_handle_pull_v1_policy.py b/metaworld/policies/sawyer_handle_pull_v1_policy.py
index 544a7098b..9ca778596 100644
--- a/metaworld/policies/sawyer_handle_pull_v1_policy.py
+++ b/metaworld/policies/sawyer_handle_pull_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerHandlePullV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"handle_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_button = o_d["handle_pos"] + np.array([0.0, -0.02, 0.0])
diff --git a/metaworld/policies/sawyer_handle_pull_v2_policy.py b/metaworld/policies/sawyer_handle_pull_v2_policy.py
index 70d341b40..903d84862 100644
--- a/metaworld/policies/sawyer_handle_pull_v2_policy.py
+++ b/metaworld/policies/sawyer_handle_pull_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerHandlePullV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"handle_pos": obs[4:7],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_handle = o_d["handle_pos"] + np.array([0, -0.04, 0])
@@ -38,5 +43,5 @@ def _desired_pos(o_d):
return pos_handle + np.array([0.0, 0.0, 0.1])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
return 1.0
diff --git a/metaworld/policies/sawyer_lever_pull_v2_policy.py b/metaworld/policies/sawyer_lever_pull_v2_policy.py
index 9a76aea2d..cf05ea937 100644
--- a/metaworld/policies/sawyer_lever_pull_v2_policy.py
+++ b/metaworld/policies/sawyer_lever_pull_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerLeverPullV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_lever = o_d["lever_pos"] + np.array([0.0, -0.055, 0.0])
diff --git a/metaworld/policies/sawyer_peg_insertion_side_v2_policy.py b/metaworld/policies/sawyer_peg_insertion_side_v2_policy.py
index 6c2d9f655..6dbdde980 100644
--- a/metaworld/policies/sawyer_peg_insertion_side_v2_policy.py
+++ b/metaworld/policies/sawyer_peg_insertion_side_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPegInsertionSideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper_distance_apart": obs[3],
@@ -18,7 +23,7 @@ def _parse_obs(obs):
"_prev_obs": obs[18:36],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -31,7 +36,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_peg = o_d["peg_pos"]
# lowest X is -.35, doesn't matter if we overshoot
@@ -49,7 +54,7 @@ def _desired_pos(o_d):
return pos_hole
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_peg = o_d["peg_pos"]
diff --git a/metaworld/policies/sawyer_peg_unplug_side_v1_policy.py b/metaworld/policies/sawyer_peg_unplug_side_v1_policy.py
index e12f4c375..b929b7f1e 100644
--- a/metaworld/policies/sawyer_peg_unplug_side_v1_policy.py
+++ b/metaworld/policies/sawyer_peg_unplug_side_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerPegUnplugSideV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"peg_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_peg = o_d["peg_pos"] + np.array([0.005, 0.0, 0.015])
@@ -39,7 +44,7 @@ def _desired_pos(o_d):
return pos_peg + np.array([0.1, 0.0, 0.0])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_peg = o_d["peg_pos"]
diff --git a/metaworld/policies/sawyer_peg_unplug_side_v2_policy.py b/metaworld/policies/sawyer_peg_unplug_side_v2_policy.py
index 72aff1401..f05f76cfa 100644
--- a/metaworld/policies/sawyer_peg_unplug_side_v2_policy.py
+++ b/metaworld/policies/sawyer_peg_unplug_side_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPegUnplugSideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_gripper": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_peg = o_d["peg_pos"] + np.array([-0.02, 0.0, 0.035])
@@ -40,7 +45,7 @@ def _desired_pos(o_d):
return pos_curr + np.array([0.01, 0.0, 0.0])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_peg = o_d["peg_pos"] + np.array([-0.02, 0.0, 0.035])
diff --git a/metaworld/policies/sawyer_pick_out_of_hole_v1_policy.py b/metaworld/policies/sawyer_pick_out_of_hole_v1_policy.py
index 6bd53ca14..497dea8dd 100644
--- a/metaworld/policies/sawyer_pick_out_of_hole_v1_policy.py
+++ b/metaworld/policies/sawyer_pick_out_of_hole_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPickOutOfHoleV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"puck_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[6:9],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, 0.0, -0.02])
pos_goal = o_d["goal_pos"]
@@ -47,7 +52,7 @@ def _desired_pos(o_d):
return pos_goal
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, 0.0, -0.02])
diff --git a/metaworld/policies/sawyer_pick_out_of_hole_v2_policy.py b/metaworld/policies/sawyer_pick_out_of_hole_v2_policy.py
index 25a856168..5182168f8 100644
--- a/metaworld/policies/sawyer_pick_out_of_hole_v2_policy.py
+++ b/metaworld/policies/sawyer_pick_out_of_hole_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPickOutOfHoleV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"unused_info": obs[7:-3],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, 0.0, 0.02])
pos_goal = o_d["goal_pos"]
@@ -48,7 +53,7 @@ def _desired_pos(o_d):
return pos_goal
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, 0.0, 0.02])
diff --git a/metaworld/policies/sawyer_pick_place_v2_policy.py b/metaworld/policies/sawyer_pick_place_v2_policy.py
index 0fc7920e3..bef796190 100644
--- a/metaworld/policies/sawyer_pick_place_v2_policy.py
+++ b/metaworld/policies/sawyer_pick_place_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPickPlaceV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper_distance_apart": obs[3],
@@ -18,7 +23,7 @@ def _parse_obs(obs):
"_prev_obs": obs[18:36],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -31,7 +36,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([-0.005, 0, 0])
pos_goal = o_d["goal_pos"]
@@ -50,7 +55,7 @@ def _desired_pos(o_d):
return pos_goal
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"]
if np.linalg.norm(pos_curr - pos_puck) < 0.07:
diff --git a/metaworld/policies/sawyer_pick_place_wall_v2_policy.py b/metaworld/policies/sawyer_pick_place_wall_v2_policy.py
index 0d5f74e41..3b6ba3915 100644
--- a/metaworld/policies/sawyer_pick_place_wall_v2_policy.py
+++ b/metaworld/policies/sawyer_pick_place_wall_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPickPlaceWallV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,20 +21,20 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self.desired_pos(o_d), p=10.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=10.0
)
- action["grab_effort"] = self.grab_effort(o_d)
+ action["grab_effort"] = self._grab_effort(o_d)
return action.array
@staticmethod
- def desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([-0.005, 0, 0])
pos_goal = o_d["goal_pos"]
@@ -62,7 +67,7 @@ def desired_pos(o_d):
return pos_goal
@staticmethod
- def grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"]
if (
diff --git a/metaworld/policies/sawyer_plate_slide_back_side_v2_policy.py b/metaworld/policies/sawyer_plate_slide_back_side_v2_policy.py
index 9cd6c634a..437424f43 100644
--- a/metaworld/policies/sawyer_plate_slide_back_side_v2_policy.py
+++ b/metaworld/policies/sawyer_plate_slide_back_side_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPlateSlideBackSideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -15,20 +20,20 @@ def _parse_obs(obs):
"unused_2": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self._desired_xyz(o_d), p=10.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=10.0
)
action["grab_effort"] = 1.0
return action.array
@staticmethod
- def _desired_xyz(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.023, 0.0, 0.025])
diff --git a/metaworld/policies/sawyer_plate_slide_back_v1_policy.py b/metaworld/policies/sawyer_plate_slide_back_v1_policy.py
index d82930be4..3ed020218 100644
--- a/metaworld/policies/sawyer_plate_slide_back_v1_policy.py
+++ b/metaworld/policies/sawyer_plate_slide_back_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerPlateSlideBackV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"puck_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, -0.065, 0.025])
diff --git a/metaworld/policies/sawyer_plate_slide_back_v2_policy.py b/metaworld/policies/sawyer_plate_slide_back_v2_policy.py
index 802e72315..7b17e0d62 100644
--- a/metaworld/policies/sawyer_plate_slide_back_v2_policy.py
+++ b/metaworld/policies/sawyer_plate_slide_back_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPlateSlideBackV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_2": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, -0.065, 0.025])
diff --git a/metaworld/policies/sawyer_plate_slide_side_v1_policy.py b/metaworld/policies/sawyer_plate_slide_side_v1_policy.py
index 9afa0bfc0..c4e1b5dcb 100644
--- a/metaworld/policies/sawyer_plate_slide_side_v1_policy.py
+++ b/metaworld/policies/sawyer_plate_slide_side_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerPlateSlideSideV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"puck_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.07, 0.0, -0.005])
diff --git a/metaworld/policies/sawyer_plate_slide_side_v2_policy.py b/metaworld/policies/sawyer_plate_slide_side_v2_policy.py
index e650babd9..fe23906fa 100644
--- a/metaworld/policies/sawyer_plate_slide_side_v2_policy.py
+++ b/metaworld/policies/sawyer_plate_slide_side_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPlateSlideSideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
# return {
# 'hand_pos': obs[:3],
# 'puck_pos': obs[3:6],
@@ -20,7 +25,7 @@ def _parse_obs(obs):
"unused_2": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -33,7 +38,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.07, 0.0, -0.005])
diff --git a/metaworld/policies/sawyer_plate_slide_v1_policy.py b/metaworld/policies/sawyer_plate_slide_v1_policy.py
index 2b159120d..dfbc0abc4 100644
--- a/metaworld/policies/sawyer_plate_slide_v1_policy.py
+++ b/metaworld/policies/sawyer_plate_slide_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPlateSlideV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"puck_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[[6, 7, 8, 10, 11]],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, -0.055, 0.03])
diff --git a/metaworld/policies/sawyer_plate_slide_v2_policy.py b/metaworld/policies/sawyer_plate_slide_v2_policy.py
index 043a40629..0690f86d5 100644
--- a/metaworld/policies/sawyer_plate_slide_v2_policy.py
+++ b/metaworld/policies/sawyer_plate_slide_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPlateSlideV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -17,7 +22,7 @@ def _parse_obs(obs):
"unused_3": obs[-2:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -30,7 +35,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([0.0, -0.055, 0.03])
diff --git a/metaworld/policies/sawyer_push_back_v1_policy.py b/metaworld/policies/sawyer_push_back_v1_policy.py
index a1bed3083..5fa6a6175 100644
--- a/metaworld/policies/sawyer_push_back_v1_policy.py
+++ b/metaworld/policies/sawyer_push_back_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPushBackV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"puck_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[6:9],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"]
@@ -43,7 +48,7 @@ def _desired_pos(o_d):
return o_d["goal_pos"] + np.array([0.0, 0.0, 0.05])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"]
diff --git a/metaworld/policies/sawyer_push_back_v2_policy.py b/metaworld/policies/sawyer_push_back_v2_policy.py
index db080be9b..d3721c147 100644
--- a/metaworld/policies/sawyer_push_back_v2_policy.py
+++ b/metaworld/policies/sawyer_push_back_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPushBackV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"]
@@ -44,7 +49,7 @@ def _desired_pos(o_d):
return o_d["goal_pos"] + np.array([0.0, 0.0, pos_curr[2]])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"]
diff --git a/metaworld/policies/sawyer_push_v2_policy.py b/metaworld/policies/sawyer_push_v2_policy.py
index 47a6c0e14..1ddfaac18 100644
--- a/metaworld/policies/sawyer_push_v2_policy.py
+++ b/metaworld/policies/sawyer_push_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPushV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"] + np.array([-0.005, 0, 0])
pos_goal = o_d["goal_pos"]
@@ -45,7 +50,7 @@ def _desired_pos(o_d):
return pos_goal
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_puck = o_d["puck_pos"]
diff --git a/metaworld/policies/sawyer_push_wall_v2_policy.py b/metaworld/policies/sawyer_push_wall_v2_policy.py
index 0b237246d..018496547 100644
--- a/metaworld/policies/sawyer_push_wall_v2_policy.py
+++ b/metaworld/policies/sawyer_push_wall_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerPushWallV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,20 +21,20 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self.desired_pos(o_d), p=10.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=10.0
)
- action["grab_effort"] = self.grab_effort(o_d)
+ action["grab_effort"] = self._grab_effort(o_d)
return action.array
@staticmethod
- def desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_obj = o_d["obj_pos"] + np.array([-0.005, 0, 0])
@@ -51,7 +56,7 @@ def desired_pos(o_d):
return o_d["goal_pos"]
@staticmethod
- def grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_obj = o_d["obj_pos"]
if (
diff --git a/metaworld/policies/sawyer_reach_v2_policy.py b/metaworld/policies/sawyer_reach_v2_policy.py
index 5841b2036..f37c3747c 100644
--- a/metaworld/policies/sawyer_reach_v2_policy.py
+++ b/metaworld/policies/sawyer_reach_v2_policy.py
@@ -1,4 +1,7 @@
+from __future__ import annotations
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +10,7 @@
class SawyerReachV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,7 +19,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
diff --git a/metaworld/policies/sawyer_reach_wall_v2_policy.py b/metaworld/policies/sawyer_reach_wall_v2_policy.py
index f5c36196c..f4042608b 100644
--- a/metaworld/policies/sawyer_reach_wall_v2_policy.py
+++ b/metaworld/policies/sawyer_reach_wall_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, move
@@ -6,7 +11,7 @@
class SawyerReachWallV2Policy(Policy):
@staticmethod
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_hand = o_d["hand_pos"]
pos_goal = o_d["goal_pos"]
# if the hand is going to run into the wall, go up while still moving
diff --git a/metaworld/policies/sawyer_shelf_place_v1_policy.py b/metaworld/policies/sawyer_shelf_place_v1_policy.py
index 9e45a6be1..f5d1ef962 100644
--- a/metaworld/policies/sawyer_shelf_place_v1_policy.py
+++ b/metaworld/policies/sawyer_shelf_place_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerShelfPlaceV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"block_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[[6, 7, 8, 10, 11]],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_block = o_d["block_pos"] + np.array([0.005, 0.0, 0.015])
pos_shelf_x = o_d["shelf_x"]
@@ -51,7 +56,7 @@ def _desired_pos(o_d):
return pos_new
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_block = o_d["block_pos"]
diff --git a/metaworld/policies/sawyer_shelf_place_v2_policy.py b/metaworld/policies/sawyer_shelf_place_v2_policy.py
index 493791bb0..1ef085776 100644
--- a/metaworld/policies/sawyer_shelf_place_v2_policy.py
+++ b/metaworld/policies/sawyer_shelf_place_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerShelfPlaceV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -17,7 +22,7 @@ def _parse_obs(obs):
"unused_3": obs[-2:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -30,7 +35,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_block = o_d["block_pos"] + np.array([-0.005, 0.0, 0.015])
pos_shelf_x = o_d["shelf_x"]
@@ -53,7 +58,7 @@ def _desired_pos(o_d):
return pos_new
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_block = o_d["block_pos"]
diff --git a/metaworld/policies/sawyer_soccer_v1_policy.py b/metaworld/policies/sawyer_soccer_v1_policy.py
index 7b8b34edb..61560f828 100644
--- a/metaworld/policies/sawyer_soccer_v1_policy.py
+++ b/metaworld/policies/sawyer_soccer_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerSoccerV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"ball_pos": obs[3:6],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[6:9],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_ball = o_d["ball_pos"] + np.array([0.0, 0.0, 0.03])
pos_goal = o_d["goal_pos"]
diff --git a/metaworld/policies/sawyer_soccer_v2_policy.py b/metaworld/policies/sawyer_soccer_v2_policy.py
index bf961dc0a..33182bb2b 100644
--- a/metaworld/policies/sawyer_soccer_v2_policy.py
+++ b/metaworld/policies/sawyer_soccer_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerSoccerV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_ball = o_d["ball_pos"] + np.array([0.0, 0.0, 0.03])
pos_goal = o_d["goal_pos"]
diff --git a/metaworld/policies/sawyer_stick_pull_v1_policy.py b/metaworld/policies/sawyer_stick_pull_v1_policy.py
index 9cc2121a6..6b048850f 100644
--- a/metaworld/policies/sawyer_stick_pull_v1_policy.py
+++ b/metaworld/policies/sawyer_stick_pull_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerStickPullV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"stick_pos": obs[3:6],
@@ -15,20 +20,20 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_pow": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self._desired_xyz(o_d), p=10.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=10.0
)
- action["grab_pow"] = self._grab_pow(o_d)
+ action["grab_pow"] = self._grab_effort(o_d)
return action.array
@staticmethod
- def _desired_xyz(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([-0.02, 0.0, 0.0])
obj_pos = o_d["obj_pos"]
@@ -49,7 +54,7 @@ def _desired_xyz(o_d):
return
@staticmethod
- def _grab_pow(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([-0.02, 0.0, 0.0])
diff --git a/metaworld/policies/sawyer_stick_pull_v2_policy.py b/metaworld/policies/sawyer_stick_pull_v2_policy.py
index 710411884..99dd943b1 100644
--- a/metaworld/policies/sawyer_stick_pull_v2_policy.py
+++ b/metaworld/policies/sawyer_stick_pull_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerStickPullV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -18,20 +23,20 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_pow": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self._desired_xyz(o_d), p=25.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=25.0
)
- action["grab_pow"] = self._grab_pow(o_d)
+ action["grab_pow"] = self._grab_effort(o_d)
return action.array
@staticmethod
- def _desired_xyz(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([-0.015, 0.0, 0.03])
thermos_pos = o_d["obj_pos"] + np.array([-0.015, 0.0, 0.03])
@@ -52,7 +57,7 @@ def _desired_xyz(o_d):
return goal_pos
@staticmethod
- def _grab_pow(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([-0.015, 0.0, 0.03])
diff --git a/metaworld/policies/sawyer_stick_push_v1_policy.py b/metaworld/policies/sawyer_stick_push_v1_policy.py
index f627236ab..5bd9db8e1 100644
--- a/metaworld/policies/sawyer_stick_push_v1_policy.py
+++ b/metaworld/policies/sawyer_stick_push_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerStickPushV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"stick_pos": obs[3:6],
@@ -15,20 +20,20 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_pow": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self._desired_xyz(o_d), p=10.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=10.0
)
- action["grab_pow"] = self._grab_pow(o_d)
+ action["grab_pow"] = self._grab_effort(o_d)
return action.array
@staticmethod
- def _desired_xyz(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([-0.02, 0.0, 0.0])
obj_pos = o_d["obj_pos"]
@@ -47,7 +52,7 @@ def _desired_xyz(o_d):
return np.array([goal_pos[0], goal_pos[1], hand_pos[2]])
@staticmethod
- def _grab_pow(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([-0.02, 0.0, 0.0])
diff --git a/metaworld/policies/sawyer_stick_push_v2_policy.py b/metaworld/policies/sawyer_stick_push_v2_policy.py
index 4afea7c42..7cdcc790b 100644
--- a/metaworld/policies/sawyer_stick_push_v2_policy.py
+++ b/metaworld/policies/sawyer_stick_push_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerStickPushV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -18,20 +23,20 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_pow": 3})
action["delta_pos"] = move(
- o_d["hand_pos"], to_xyz=self._desired_xyz(o_d), p=10.0
+ o_d["hand_pos"], to_xyz=self._desired_pos(o_d), p=10.0
)
- action["grab_pow"] = self._grab_pow(o_d)
+ action["grab_pow"] = self._grab_effort(o_d)
return action.array
@staticmethod
- def _desired_xyz(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([0.015, 0.0, 0.03])
thermos_pos = o_d["obj_pos"]
@@ -52,7 +57,7 @@ def _desired_xyz(o_d):
return goal_pos
@staticmethod
- def _grab_pow(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
hand_pos = o_d["hand_pos"]
stick_pos = o_d["stick_pos"] + np.array([0.015, 0.0, 0.03])
diff --git a/metaworld/policies/sawyer_sweep_into_v1_policy.py b/metaworld/policies/sawyer_sweep_into_v1_policy.py
index 5f0de3bdb..8e0c57b3e 100644
--- a/metaworld/policies/sawyer_sweep_into_v1_policy.py
+++ b/metaworld/policies/sawyer_sweep_into_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerSweepIntoV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"cube_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"] + np.array([0.0, 0.0, 0.015])
@@ -39,7 +44,7 @@ def _desired_pos(o_d):
return np.array([0.0, 0.8, 0.015])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"]
diff --git a/metaworld/policies/sawyer_sweep_into_v2_policy.py b/metaworld/policies/sawyer_sweep_into_v2_policy.py
index 9193d298c..da6b6572a 100644
--- a/metaworld/policies/sawyer_sweep_into_v2_policy.py
+++ b/metaworld/policies/sawyer_sweep_into_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerSweepIntoV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"] + np.array([-0.005, 0.0, 0.01])
pos_goal = o_d["goal_pos"]
@@ -42,7 +47,7 @@ def _desired_pos(o_d):
return pos_goal
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"]
diff --git a/metaworld/policies/sawyer_sweep_v1_policy.py b/metaworld/policies/sawyer_sweep_v1_policy.py
index 21d08f042..ea9f23267 100644
--- a/metaworld/policies/sawyer_sweep_v1_policy.py
+++ b/metaworld/policies/sawyer_sweep_v1_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,14 +12,14 @@
class SawyerSweepV1Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"cube_pos": obs[3:6],
"unused_info": obs[6:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -27,7 +32,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"] + np.array([0.0, 0.0, 0.015])
@@ -40,7 +45,7 @@ def _desired_pos(o_d):
return np.array([0.5, pos_cube[1], 0.1])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"]
diff --git a/metaworld/policies/sawyer_sweep_v2_policy.py b/metaworld/policies/sawyer_sweep_v2_policy.py
index 8dfebc59b..d319fa69c 100644
--- a/metaworld/policies/sawyer_sweep_v2_policy.py
+++ b/metaworld/policies/sawyer_sweep_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerSweepV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_1": obs[3],
@@ -16,7 +21,7 @@ def _parse_obs(obs):
"goal_pos": obs[-3:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -29,7 +34,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"] + np.array([0.0, 0.0, 0.015])
pos_goal = o_d["goal_pos"]
@@ -43,7 +48,7 @@ def _desired_pos(o_d):
return pos_goal + np.array([0, 0, 0.1])
@staticmethod
- def _grab_effort(o_d):
+ def _grab_effort(o_d: dict[str, npt.NDArray[np.float64]]) -> float:
pos_curr = o_d["hand_pos"]
pos_cube = o_d["cube_pos"]
diff --git a/metaworld/policies/sawyer_window_close_v2_policy.py b/metaworld/policies/sawyer_window_close_v2_policy.py
index 66ae1fde5..3f4e0c747 100644
--- a/metaworld/policies/sawyer_window_close_v2_policy.py
+++ b/metaworld/policies/sawyer_window_close_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerWindowCloseV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"gripper_unused": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_wndw = o_d["wndw_pos"] + np.array([+0.03, -0.03, -0.08])
diff --git a/metaworld/policies/sawyer_window_open_v2_policy.py b/metaworld/policies/sawyer_window_open_v2_policy.py
index c5bbad3a5..03271a7c7 100644
--- a/metaworld/policies/sawyer_window_open_v2_policy.py
+++ b/metaworld/policies/sawyer_window_open_v2_policy.py
@@ -1,4 +1,9 @@
+from __future__ import annotations
+
+from typing import Any
+
import numpy as np
+import numpy.typing as npt
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, assert_fully_parsed, move
@@ -7,7 +12,7 @@
class SawyerWindowOpenV2Policy(Policy):
@staticmethod
@assert_fully_parsed
- def _parse_obs(obs):
+ def _parse_obs(obs: npt.NDArray[np.float64]) -> dict[str, npt.NDArray[np.float64]]:
return {
"hand_pos": obs[:3],
"unused_gripper_open": obs[3],
@@ -15,7 +20,7 @@ def _parse_obs(obs):
"unused_info": obs[7:],
}
- def get_action(self, obs):
+ def get_action(self, obs: npt.NDArray[np.float64]) -> npt.NDArray[np.float32]:
o_d = self._parse_obs(obs)
action = Action({"delta_pos": np.arange(3), "grab_effort": 3})
@@ -28,7 +33,7 @@ def get_action(self, obs):
return action.array
@staticmethod
- def _desired_pos(o_d):
+ def _desired_pos(o_d: dict[str, npt.NDArray[np.float64]]) -> npt.NDArray[Any]:
pos_curr = o_d["hand_pos"]
pos_wndw = o_d["wndw_pos"] + np.array([-0.03, -0.03, -0.08])
diff --git a/metaworld/py.typed b/metaworld/py.typed
new file mode 100644
index 000000000..e69de29bb
diff --git a/metaworld/types.py b/metaworld/types.py
new file mode 100644
index 000000000..638d36690
--- /dev/null
+++ b/metaworld/types.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+from typing import Any, NamedTuple, Tuple
+
+import numpy as np
+import numpy.typing as npt
+from typing_extensions import NotRequired, TypeAlias, TypedDict
+
+
+class Task(NamedTuple):
+ """All data necessary to describe a single MDP.
+
+ Should be passed into a `MetaWorldEnv`'s `set_task` method.
+ """
+
+ env_name: str
+ data: bytes # Contains env parameters like random_init and *a* goal
+
+
+XYZ: TypeAlias = "Tuple[float, float, float]"
+"""A 3D coordinate."""
+
+
+class EnvironmentStateDict(TypedDict):
+ state: dict[str, Any]
+ mjb: str
+ mocap: tuple[npt.NDArray[np.float64], npt.NDArray[np.float64]]
+
+
+class ObservationDict(TypedDict):
+ state_observation: npt.NDArray[np.float64]
+ state_desired_goal: npt.NDArray[np.float64]
+ state_achieved_goal: npt.NDArray[np.float64]
+
+
+class InitConfigDict(TypedDict):
+ obj_init_angle: NotRequired[float]
+ obj_init_pos: npt.NDArray[Any]
+ hand_init_pos: npt.NDArray[Any]
+
+
+class HammerInitConfigDict(TypedDict):
+ hammer_init_pos: npt.NDArray[Any]
+ hand_init_pos: npt.NDArray[Any]
+
+
+class StickInitConfigDict(TypedDict):
+ stick_init_pos: npt.NDArray[Any]
+ hand_init_pos: npt.NDArray[Any]
diff --git a/pyproject.toml b/pyproject.toml
index e8e79653e..64fdb2c69 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,5 +1,4 @@
# Package ######################################################################
-
[build-system]
requires = ["setuptools >= 61.0.0"]
build-backend = "setuptools.build_meta"
@@ -14,7 +13,7 @@ authors = [{ name = "Farama Foundation", email = "contact@farama.org" }]
license = { text = "MIT License" }
keywords = ["Reinforcement Learning", "game", "RL", "AI", "gymnasium"]
classifiers = [
- "Development Status :: 4 - Beta", # change to `5 - Production/Stable` when ready
+ "Development Status :: 4 - Beta", # change to `5 - Production/Stable` when ready
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
@@ -25,8 +24,8 @@ classifiers = [
'Topic :: Scientific/Engineering :: Artificial Intelligence',
]
dependencies = [
- "gymnasium@git+https://github.com/Farama-Foundation/Gymnasium.git",
- "mujoco<3.0.0",
+ "gymnasium>=1.0.0a1",
+ "mujoco>=3.0.0",
"numpy>=1.18",
"scipy>=1.4.1",
"imageio"
@@ -34,12 +33,8 @@ dependencies = [
[project.optional-dependencies]
# Update dependencies in `all` if any are added or removed
-testing = [
- "ipdb",
- "memory_profiler",
- "pyquaternion==0.9.5",
- "pytest>=4.4.0",
-]
+testing = ["ipdb", "memory_profiler", "pyquaternion==0.9.5", "pytest>=4.4.0"]
+dev = ["black", "isort", "mypy"]
[project.urls]
Homepage = "https://farama.org"
@@ -50,11 +45,13 @@ Documentation = "https://metaworld.github.io/"
[tool.setuptools]
include-package-data = true
+[tool.setuptools.package-data]
+metaworld = ["py.typed"]
+
[tool.setuptools.packages.find]
include = ["metaworld", "metaworld.*"]
# Linters and Test tools #######################################################
-
[tool.black]
safe = true
@@ -62,3 +59,11 @@ safe = true
atomic = true
profile = "black"
src_paths = ["metaworld", "tests"]
+
+[tool.mypy]
+plugins = ["numpy.typing.mypy_plugin"]
+exclude = ["docs"]
+
+[[tool.mypy.overrides]]
+module = ["setuptools", "glfw", "mujoco", "memory_profiler", "scipy.*"]
+ignore_missing_imports = true
diff --git a/scripts/demo_sawyer.py b/scripts/demo_sawyer.py
deleted file mode 100755
index e83788a80..000000000
--- a/scripts/demo_sawyer.py
+++ /dev/null
@@ -1,815 +0,0 @@
-#!/usr/bin/env python3
-
-import argparse
-import time
-
-import glfw
-import numpy as np
-
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_box_open import SawyerBoxOpenEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_door_hook import SawyerDoorHookEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_laptop_close import SawyerLaptopCloseEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_multiple_objects import MultiSawyerEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place import SawyerPickAndPlaceEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_pick_and_place_wsg import (
- SawyerPickAndPlaceWsgEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env import (
- SawyerPushAndReachXYEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_push_and_reach_env_two_pucks import (
- SawyerPushAndReachXYZDoublePuckEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj import SawyerTwoObjectEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_push_nips import (
- SawyerPushAndReachXYEasyEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_reach import (
- SawyerReachEnv,
- SawyerReachXYZEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_rope import SawyerRopeEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_stack import SawyerStackEnv
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_throw import SawyerThrowEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_assembly_peg import SawyerNutAssemblyEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_bin_picking import SawyerBinPickingEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_box_close import SawyerBoxCloseEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_button_press import SawyerButtonPressEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_button_press_topdown import (
- SawyerButtonPressTopdownEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_dial_turn import SawyerDialTurnEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_door import SawyerDoorEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_door_close import SawyerDoorCloseEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_drawer_close import SawyerDrawerCloseEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_drawer_open import SawyerDrawerOpenEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_hammer import SawyerHammerEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_hand_insert import SawyerHandInsertEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_lever_pull import SawyerLeverPullEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_peg_insertion_side import (
- SawyerPegInsertionSideEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_reach_push_pick_place import (
- SawyerReachPushPickPlaceEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_shelf_place import SawyerShelfPlaceEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_stick_pull import SawyerStickPullEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_stick_push import SawyerStickPushEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_sweep import SawyerSweepEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_sweep_into_goal import (
- SawyerSweepIntoGoalEnv,
-)
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_window_close import SawyerWindowCloseEnv
-from metaworld.envs.mujoco.sawyer_xyz.v1.sawyer_window_open import SawyerWindowOpenEnv
-
-
-# function that closes the render window
-def close(env):
- if env.viewer is not None:
- # self.viewer.finish()
- glfw.destroy_window(env.viewer.window)
- env.viewer = None
-
-
-def sample_sawyer_assembly_peg():
- env = SawyerNutAssemblyEnv()
- for _ in range(1):
- env.reset()
- for _ in range(50):
- env.render()
- env.step(env.action_space.sample())
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_bin_picking():
- env = SawyerBinPickingEnv()
- for _ in range(1):
- env.reset()
- for _ in range(50):
- env.render()
- env.step(env.action_space.sample())
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_box_close():
- env = SawyerBoxCloseEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(10):
- env.data.set_mocap_pos("mocap", np.array([0, 0.8, 0.25]))
- env.data.set_mocap_quat("mocap", np.array([1, 0, 1, 0]))
- env.do_simulation([-1, 1], env.frame_skip)
- # self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- env.render()
- # env.step(env.action_space.sample())
- # env.step(np.array([0, -1, 0, 0, 0]))
- if _ < 10:
- env.step(np.array([0, 0, -1, 0, 0]))
- elif _ < 50:
- env.step(np.array([0, 0, 0, 0, 1]))
- else:
- env.step(np.array([0, 0, 1, 0, 1]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_box_open():
- env = SawyerBoxOpenEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(10):
- env.data.set_mocap_pos("mocap", np.array([0, 0.8, 0.25]))
- # env.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.25]))
- env.data.set_mocap_quat("mocap", np.array([1, 0, 1, 0]))
- env.do_simulation([-1, 1], env.frame_skip)
- # self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- env.render()
- if _ < 10:
- env.step(np.array([0, 0, -1, 0, 0]))
- elif _ < 50:
- env.step(np.array([0, 0, 0, 0, 1]))
- else:
- env.step(np.array([0, 0, 1, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 0]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_button_press_6d0f():
- env = SawyerButtonPressEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.25]))
- # # env.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.25]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- print(env.data.site_xpos[env.model.site_name2id("buttonStart")])
- env.render()
- # env.step(env.action_space.sample())
- # if _ < 10:
- # env.step(np.array([0, 0, -1, 0, 0]))
- # elif _ < 50:
- # env.step(np.array([0, 0, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 0]))
- env.step(np.array([0, 1, 0, 0, 1]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_button_press_topdown_6d0f():
- env = SawyerButtonPressTopdownEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.25]))
- # # env.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.25]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- print(env.data.site_xpos[env.model.site_name2id("buttonStart")])
- env.render()
- # env.step(env.action_space.sample())
- # if _ < 10:
- # env.step(np.array([0, 0, -1, 0, 0]))
- # elif _ < 50:
- # env.step(np.array([0, 0, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 0]))
- env.step(np.array([0, 0, -1, 0, 1]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_dial_turn():
- env = SawyerDialTurnEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.25]))
- # # env.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.25]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- print(env.data.site_xpos[env.model.site_name2id("dialStart")])
- env.render()
- # env.step(env.action_space.sample())
- # if _ < 10:
- # env.step(np.array([0, 0, -1, 0, 0]))
- # elif _ < 50:
- # env.step(np.array([0, 0, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 0]))
- env.step(np.array([0, 0, -1, 0, 1]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_door():
- env = SawyerDoorEnv()
- for _ in range(100):
- env.render()
- action = env.action_space.sample()
- env.step(action)
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_door_close():
- env = SawyerDoorCloseEnv()
- for _ in range(100):
- env.render()
- action = env.action_space.sample()
- env.step(action)
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_door_hook():
- env = SawyerDoorHookEnv()
- for _ in range(100):
- env.render()
- action = env.action_space.sample()
- env.step(action)
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_drawer_close():
- env = SawyerDrawerCloseEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- env._set_obj_xyz(np.array([-0.2, 0.8, 0.05]))
- for _ in range(10):
- env.data.set_mocap_pos("mocap", np.array([0, 0.5, 0.05]))
- env.data.set_mocap_quat("mocap", np.array([1, 0, 1, 0]))
- env.do_simulation([-1, 1], env.frame_skip)
- # self.do_simulation(None, self.frame_skip)
- for _ in range(50):
- env.render()
- # env.step(env.action_space.sample())
- # env.step(np.array([0, -1, 0, 0, 0]))
- env.step(np.array([0, 1, 0, 0, 0]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_drawer_open():
- env = SawyerDrawerOpenEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- env._set_obj_xyz(np.array([-0.2, 0.8, 0.05]))
- for _ in range(10):
- env.data.set_mocap_pos("mocap", np.array([0, 0.5, 0.05]))
- env.data.set_mocap_quat("mocap", np.array([1, 0, 1, 0]))
- env.do_simulation([-1, 1], env.frame_skip)
- # self.do_simulation(None, self.frame_skip)
- for _ in range(50):
- env.render()
- # env.step(env.action_space.sample())
- # env.step(np.array([0, -1, 0, 0, 0]))
- env.step(np.array([0, 1, 0, 0, 0]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_hammer():
- env = SawyerHammerEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.25]))
- # # env.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.25]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- env.render()
- # env.step(env.action_space.sample())
- # if _ < 10:
- # env.step(np.array([0, 0, -1, 0, 0]))
- # elif _ < 50:
- # env.step(np.array([0, 0, 0, 0, 1]))
- if _ < 10:
- env.step(np.array([0, 0, -1, 0, 0]))
- else:
- env.step(np.array([0, 1, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 0]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_hand_insert():
- env = SawyerHandInsertEnv(fix_goal=True)
- for i in range(100):
- if i % 100 == 0:
- env.reset()
- env.step(np.array([0, 1, 1]))
- env.render()
- close(env)
-
-
-def sample_sawyer_laptop_close():
- env = SawyerLaptopCloseEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.9, 0.22]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # # env.do_simulation([-1,1], env.frame_skip)
- # env.do_simulation([1,-1], env.frame_skip)
- # env._set_obj_xyz(np.array([-0.2, 0.8, 0.05]))
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.5, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- env.render()
- # env.step(env.action_space.sample())
- # env.step(np.array([0, -1, 0, 0, 1]))
- env.step(np.array([0, 0, 0, 0, 1]))
- print(env.get_laptop_angle())
- # env.step(np.array([0, 1, 0, 0, 0]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_lever_pull():
- env = SawyerLeverPullEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.25]))
- # # env.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.25]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- print(env.data.site_xpos[env.model.site_name2id("basesite")])
- env.render()
- # env.step(env.action_space.sample())
- # if _ < 10:
- # env.step(np.array([0, 0, -1, 0, 0]))
- # elif _ < 50:
- # env.step(np.array([0, 0, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 0]))
- env.step(np.array([0, 0, -1, 0, 1]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-# sawyer_multiple_objects doesn't work
-def sample_sawyer_multiple_objects():
- # env = MultiSawyerEnv(
- # do_render=False,
- # finger_sensors=False,
- # num_objects=3,
- # object_meshes=None,
- # randomize_initial_pos=False,
- # fix_z=True,
- # fix_gripper=True,
- # fix_rotation=True,
- # )
- # env = ImageEnv(env,
- # non_presampled_goal_img_is_garbage=True,
- # recompute_reward=False,
- # init_camera=sawyer_pusher_camera_upright_v2,
- # )
- # for i in range(10000):
- # a = np.random.uniform(-1, 1, 5)
- # o, _, _, _ = env.step(a)
- # if i % 10 == 0:
- # env.reset()
-
- # img = o["image_observation"].transpose().reshape(84, 84, 3)
- # cv2.imshow('window', img)
- # cv2.waitKey(100)
-
- size = 0.1
- low = np.array([-size, 0.4 - size, 0])
- high = np.array([size, 0.4 + size, 0.1])
- env = MultiSawyerEnv(
- do_render=False,
- finger_sensors=False,
- num_objects=1,
- object_meshes=None,
- # randomize_initial_pos=True,
- fix_z=True,
- fix_gripper=True,
- fix_rotation=True,
- cylinder_radius=0.03,
- maxlen=0.03,
- workspace_low=low,
- workspace_high=high,
- hand_low=low,
- hand_high=high,
- init_hand_xyz=(0, 0.4 - size, 0.089),
- )
- for i in range(100):
- a = np.random.uniform(-1, 1, 5)
- o, r, _, _ = env.step(a)
- if i % 100 == 0:
- env.reset()
- # print(i, r)
- # print(o["state_observation"])
- # print(o["state_desired_goal"])
- env.render()
- close(env)
-
- # from robosuite.devices import SpaceMouse
-
- # device = SpaceMouse()
- # size = 0.1
- # low = np.array([-size, 0.4 - size, 0])
- # high = np.array([size, 0.4 + size, 0.1])
- # env = MultiSawyerEnv(
- # do_render=False,
- # finger_sensors=False,
- # num_objects=1,
- # object_meshes=None,
- # workspace_low = low,
- # workspace_high = high,
- # hand_low = low,
- # hand_high = high,
- # fix_z=True,
- # fix_gripper=True,
- # fix_rotation=True,
- # cylinder_radius=0.03,
- # maxlen=0.03,
- # init_hand_xyz=(0, 0.4-size, 0.089),
- # )
- # for i in range(10000):
- # state = device.get_controller_state()
- # dpos, rotation, grasp, reset = (
- # state["dpos"],
- # state["rotation"],
- # state["grasp"],
- # state["reset"],
- # )
-
- # # convert into a suitable end effector action for the environment
- # # current = env._right_hand_orn
- # # drotation = current.T.dot(rotation) # relative rotation of desired from current
- # # dquat = T.mat2quat(drotation)
- # # grasp = grasp - 1. # map 0 to -1 (open) and 1 to 0 (closed halfway)
- # # action = np.concatenate([dpos, dquat, [grasp]])
-
- # a = dpos * 10 # 200
-
- # # a[:3] = np.array((0, 0.7, 0.1)) - env.get_endeff_pos()
- # # a = np.array([np.random.uniform(-0.05, 0.05), np.random.uniform(-0.05, 0.05), 0.1, 0 , 1])
- # o, _, _, _ = env.step(a)
- # if i % 100 == 0:
- # env.reset()
- # # print(env.sim.data.qpos[:7])
- # env.render()
-
-
-def sample_sawyer_peg_insertion_side():
- env = SawyerPegInsertionSideEnv()
- for _ in range(1):
- env.reset()
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.05]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- # for _ in range(10):
- # env.data.set_mocap_pos('mocap', np.array([0, 0.8, 0.25]))
- # # env.data.set_mocap_pos('mocap', np.array([0, 0.6, 0.25]))
- # env.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
- # env.do_simulation([-1,1], env.frame_skip)
- # #self.do_simulation(None, self.frame_skip)
- for _ in range(100):
- print(
- "Before:",
- env.sim.model.site_pos[env.model.site_name2id("hole")]
- + env.sim.model.body_pos[env.model.body_name2id("box")],
- )
- env.sim.model.body_pos[env.model.body_name2id("box")] = np.array(
- [-0.3, np.random.uniform(0.5, 0.9), 0.05]
- )
- print(
- "After: ",
- env.sim.model.site_pos[env.model.site_name2id("hole")]
- + env.sim.model.body_pos[env.model.body_name2id("box")],
- )
- env.render()
- env.step(env.action_space.sample())
- # if _ < 10:
- # env.step(np.array([0, 0, -1, 0, 0]))
- # elif _ < 50:
- # env.step(np.array([0, 0, 0, 0, 1]))
- # if _ < 10:
- # env.step(np.array([0, 0, -1, 0, 0]))
- # else:
- # env.step(np.array([0, 1, 0, 0, 1]))
- # env.step(np.array([0, 1, 0, 0, 0]))
- # env.step(np.array([np.random.uniform(low=-1., high=1.), np.random.uniform(low=-1., high=1.), 0.]))
- time.sleep(0.05)
- close(env)
-
-
-def sample_sawyer_pick_and_place():
- env = SawyerPickAndPlaceEnv()
- env.reset()
- for _ in range(50):
- env.render()
- env.step(env.action_space.sample())
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_pick_and_place_wsg():
- env = SawyerPickAndPlaceWsgEnv()
- env.reset()
- for _ in range(100):
- env.render()
- env.step(np.array([0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]))
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_push_and_reach_env():
- env = SawyerPushAndReachXYEnv()
- for i in range(100):
- if i % 100 == 0:
- env.reset()
- env.step([0, 1])
- env.render()
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_push_and_reach_two_pucks():
- env = SawyerPushAndReachXYZDoublePuckEnv()
- env.reset()
- for i in range(100):
- env.render()
- env.set_goal({"state_desired_goal": np.array([1, 1, 1, 1, 1, 1, 1])})
- env.step(env.action_space.sample())
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_push_multiobj():
- env = SawyerTwoObjectEnv()
- env.reset()
- for _ in range(50):
- env.render()
- env.step(env.action_space.sample())
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_push_nips():
- env = SawyerPushAndReachXYEasyEnv()
- for _ in range(100):
- env.render()
- env.step(env.action_space.sample())
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_reach():
- env = SawyerReachEnv()
- for i in range(100):
- if i % 100 == 0:
- env.reset()
- env.step(env.action_space.sample())
- env.render()
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_reach_push_pick_place():
- env = SawyerReachPushPickPlaceEnv()
- for i in range(100):
- if i % 100 == 0:
- env.reset()
- env.step(np.array([0, 1, 1]))
- env.render()
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_rope():
- env = SawyerRopeEnv()
- env.reset()
- for _ in range(50):
- env.render()
- env.step(env.action_space.sample())
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_shelf_place():
- env = SawyerShelfPlaceEnv()
- env.reset()
- for _ in range(100):
- env.render()
- env.step(env.action_space.sample())
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_stack():
- env = SawyerStackEnv()
- env.reset()
- for _ in range(50):
- env.render()
- env.step(env.action_space.sample())
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_stick_pull():
- env = SawyerStickPullEnv()
- env.reset()
- for _ in range(100):
- env.render()
- env.step(env.action_space.sample())
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_stick_push():
- env = SawyerStickPushEnv()
- env.reset()
- for _ in range(100):
- env.render()
- env.step(env.action_space.sample())
- if _ < 10:
- env.step(np.array([0, 0, -1, 0, 0]))
- elif _ < 20:
- env.step(np.array([0, 0, 0, 0, 1]))
- else:
- env.step(np.array([1, 0, 0, 0, 1]))
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_sweep():
- env = SawyerSweepEnv(fix_goal=True)
- for i in range(200):
- if i % 100 == 0:
- env.reset()
- env.step(env.action_space.sample())
- env.render()
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_sweep_into_goal():
- env = SawyerSweepIntoGoalEnv(fix_goal=True)
- for i in range(1000):
- if i % 100 == 0:
- env.reset()
- env.step(np.array([0, 1, 1]))
- env.render()
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_throw():
- env = SawyerThrowEnv()
- for i in range(1000):
- if i % 100 == 0:
- env.reset()
- env.step(np.array([0, 0, 0, 1]))
- env.render()
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_window_close():
- env = SawyerWindowCloseEnv()
- env.reset()
- for _ in range(100):
- env.render()
- env.step(np.array([1, 0, 0, 1]))
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-def sample_sawyer_window_open():
- env = SawyerWindowOpenEnv()
- env.reset()
- for _ in range(100):
- env.render()
- env.step(np.array([1, 0, 0, 1]))
- time.sleep(0.05)
- glfw.destroy_window(env.viewer.window)
-
-
-demos = {
- SawyerNutAssemblyEnv: sample_sawyer_assembly_peg,
- SawyerBinPickingEnv: sample_sawyer_bin_picking,
- SawyerBoxCloseEnv: sample_sawyer_box_close,
- SawyerBoxOpenEnv: sample_sawyer_box_open,
- SawyerButtonPressEnv: sample_sawyer_button_press_6d0f,
- SawyerButtonPressTopdownEnv: sample_sawyer_button_press_topdown_6d0f,
- SawyerDialTurnEnv: sample_sawyer_dial_turn,
- SawyerDoorEnv: sample_sawyer_door,
- SawyerDoorCloseEnv: sample_sawyer_door_close,
- SawyerDoorHookEnv: sample_sawyer_door_hook,
- SawyerDoorEnv: sample_sawyer_door,
- SawyerDrawerCloseEnv: sample_sawyer_drawer_close,
- SawyerDrawerOpenEnv: sample_sawyer_drawer_open,
- SawyerHammerEnv: sample_sawyer_hammer,
- SawyerHandInsertEnv: sample_sawyer_hand_insert,
- SawyerLaptopCloseEnv: sample_sawyer_laptop_close,
- SawyerLeverPullEnv: sample_sawyer_lever_pull,
- MultiSawyerEnv: sample_sawyer_multiple_objects,
- SawyerPegInsertionSideEnv: sample_sawyer_peg_insertion_side,
- SawyerPickAndPlaceEnv: sample_sawyer_pick_and_place,
- SawyerPickAndPlaceEnv: sample_sawyer_pick_and_place,
- SawyerPickAndPlaceWsgEnv: sample_sawyer_pick_and_place_wsg,
- SawyerPushAndReachXYEnv: sample_sawyer_push_and_reach_env,
- SawyerPushAndReachXYZDoublePuckEnv: sample_sawyer_push_and_reach_two_pucks,
- SawyerTwoObjectEnv: sample_sawyer_push_multiobj,
- SawyerTwoObjectEnv: sample_sawyer_push_multiobj,
- SawyerPushAndReachXYEasyEnv: sample_sawyer_push_nips,
- SawyerReachXYZEnv: sample_sawyer_reach,
- SawyerReachEnv: sample_sawyer_reach,
- SawyerReachPushPickPlaceEnv: sample_sawyer_reach_push_pick_place,
- SawyerRopeEnv: sample_sawyer_rope,
- SawyerShelfPlaceEnv: sample_sawyer_shelf_place,
- SawyerStackEnv: sample_sawyer_stack,
- SawyerStickPullEnv: sample_sawyer_stick_pull,
- SawyerStickPushEnv: sample_sawyer_stick_push,
- SawyerSweepEnv: sample_sawyer_sweep,
- SawyerSweepIntoGoalEnv: sample_sawyer_sweep_into_goal,
- SawyerThrowEnv: sample_sawyer_throw,
- SawyerWindowCloseEnv: sample_sawyer_window_close,
- SawyerWindowOpenEnv: sample_sawyer_window_open,
-}
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="Run sample test of one specific environment!"
- )
- parser.add_argument("--env", help="The environment name wanted to be test.")
- env_cls = globals()[parser.parse_args().env]
- demos[env_cls]()
diff --git a/scripts/keyboard_control.py b/scripts/keyboard_control.py
index 5a139680c..736168dc3 100644
--- a/scripts/keyboard_control.py
+++ b/scripts/keyboard_control.py
@@ -7,10 +7,10 @@
import sys
import numpy as np
-import pygame
-from pygame.locals import KEYDOWN, QUIT
+import pygame # type: ignore
+from pygame.locals import KEYDOWN, QUIT # type: ignore
-from metaworld.envs.mujoco.sawyer_xyz import SawyerPickPlaceEnvV2
+from metaworld.envs.mujoco.sawyer_xyz.v2 import SawyerPickPlaceEnvV2
pygame.init()
screen = pygame.display.set_mode((400, 300))
@@ -44,7 +44,7 @@
lock_action = False
random_action = False
obs = env.reset()
-action = np.zeros(4)
+action = np.zeros(4, dtype=np.float32)
while True:
done = False
if not lock_action:
@@ -65,13 +65,13 @@
action[3] = 1
elif new_action == "open":
action[3] = -1
- elif new_action is not None:
+ elif new_action is not None and isinstance(new_action, np.ndarray):
action[:3] = new_action[:3]
else:
- action = np.zeros(3)
+ action = np.zeros(3, dtype=np.float32)
print(action)
else:
- action = env.action_space.sample()
+ action = np.array(env.action_space.sample(), dtype=np.float32)
ob, reward, done, infos = env.step(action)
# time.sleep(1)
if done:
diff --git a/scripts/policy_testing.py b/scripts/policy_testing.py
index 333bf40b3..2426df06c 100644
--- a/scripts/policy_testing.py
+++ b/scripts/policy_testing.py
@@ -21,18 +21,12 @@
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
-obs = env.reset()
+obs, _ = env.reset()
p = policy()
count = 0
done = False
-states = []
-actions = []
-next_states = []
-rewards = []
-
-dones = []
info = {}
while count < 500 and not done:
diff --git a/scripts/profile_memory_usage.py b/scripts/profile_memory_usage.py
index 4a5da2009..690158268 100755
--- a/scripts/profile_memory_usage.py
+++ b/scripts/profile_memory_usage.py
@@ -2,7 +2,7 @@
"""Test script for profiling average memory footprint."""
import memory_profiler
-from metaworld.envs.mujoco.sawyer_xyz.env_lists import HARD_MODE_LIST
+from metaworld.envs.mujoco.env_dict import ALL_V2_ENVIRONMENTS
from tests.helpers import step_env
@@ -22,7 +22,7 @@ def build_and_step_all(classes):
def profile_hard_mode_indepedent():
profile = {}
- for env_cls in HARD_MODE_LIST:
+ for env_cls in ALL_V2_ENVIRONMENTS:
target = (build_and_step, [env_cls], {})
memory_usage = memory_profiler.memory_usage(target)
profile[env_cls] = max(memory_usage)
@@ -31,7 +31,7 @@ def profile_hard_mode_indepedent():
def profile_hard_mode_shared():
- target = (build_and_step_all, [HARD_MODE_LIST], {})
+ target = (build_and_step_all, [ALL_V2_ENVIRONMENTS], {})
usage = memory_profiler.memory_usage(target)
return max(usage)
@@ -48,17 +48,13 @@ def profile_hard_mode_shared():
print("| min | mean | max |")
print("|----------|----------|----------|")
print(
- "| {:.1f} MB | {:.1f} MB | {:.1f} MB |".format(
- min_independent, mean_independent, max_independent
- )
+ f"| {min_independent:.1f} MB | {mean_independent:.1f} MB | {max_independent:.1f} MB |"
)
print("\n")
print("--------- Shared memory footprint ---------")
max_usage = profile_hard_mode_shared()
- mean_shared = max_usage / len(HARD_MODE_LIST)
+ mean_shared = max_usage / len(ALL_V2_ENVIRONMENTS)
print(
- "Mean memory footprint (n = {}): {:.1f} MB".format(
- len(HARD_MODE_LIST), mean_shared
- )
+ f"Mean memory footprint (n = {len(ALL_V2_ENVIRONMENTS)}): {mean_shared:.1f} MB"
)
diff --git a/tests/metaworld/envs/mujoco/sawyer_xyz/test_obs_space_hand.py b/tests/metaworld/envs/mujoco/sawyer_xyz/test_obs_space_hand.py
index f015d143e..ecb2a1d09 100644
--- a/tests/metaworld/envs/mujoco/sawyer_xyz/test_obs_space_hand.py
+++ b/tests/metaworld/envs/mujoco/sawyer_xyz/test_obs_space_hand.py
@@ -2,7 +2,7 @@
import pytest
from metaworld.envs.mujoco.env_dict import ALL_V2_ENVIRONMENTS
-from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv
+from metaworld.envs.mujoco.sawyer_xyz import SawyerXYZEnv
from metaworld.policies.action import Action
from metaworld.policies.policy import Policy, move