From 695e18ef252087386db4cf292b58c7d0f56ceb49 Mon Sep 17 00:00:00 2001 From: Ephraim Rusu Date: Wed, 7 Feb 2024 12:44:46 -0800 Subject: [PATCH] External wrappers have unwrapped property --- abmarl/external/gym_env_wrapper.py | 20 +++++++++---------- abmarl/external/open_spiel_env_wrapper.py | 10 ++++++++++ .../external/rllib_multiagentenv_wrapper.py | 10 ++++++++++ tests/test_workflow.py | 2 ++ 4 files changed, 32 insertions(+), 10 deletions(-) diff --git a/abmarl/external/gym_env_wrapper.py b/abmarl/external/gym_env_wrapper.py index c44fd631..238335c4 100644 --- a/abmarl/external/gym_env_wrapper.py +++ b/abmarl/external/gym_env_wrapper.py @@ -34,6 +34,16 @@ def observation_space(self): """ return self.agent.observation_space + @property + def unwrapped(self): + """ + Fall through all the wrappers and obtain the original, completely unwrapped simulation. + """ + try: + return self.sim.unwrapped + except AttributeError: + return self.sim + def reset(self, **kwargs): """ Return the observation from the single agent. @@ -58,13 +68,3 @@ def render(self, **kwargs): Forward render calls to the composed simulation. """ self.sim.render(**kwargs) - - @property - def unwrapped(self): - """ - Fall through all the wrappers and obtain the original, completely unwrapped simulation. - """ - try: - return self.sim.unwrapped - except AttributeError: - return self.sim diff --git a/abmarl/external/open_spiel_env_wrapper.py b/abmarl/external/open_spiel_env_wrapper.py index 72a8b669..c3ab1c97 100644 --- a/abmarl/external/open_spiel_env_wrapper.py +++ b/abmarl/external/open_spiel_env_wrapper.py @@ -121,6 +121,16 @@ def current_player(self, value): "Current player must be an agent in the simulation." self._current_player = value + @property + def unwrapped(self): + """ + Fall through all the wrappers and obtain the original, completely unwrapped simulation. + """ + try: + return self.sim.unwrapped + except AttributeError: + return self.sim + def reset(self, **kwargs): """ Reset the simulation. diff --git a/abmarl/external/rllib_multiagentenv_wrapper.py b/abmarl/external/rllib_multiagentenv_wrapper.py index f150370a..96dba3cb 100644 --- a/abmarl/external/rllib_multiagentenv_wrapper.py +++ b/abmarl/external/rllib_multiagentenv_wrapper.py @@ -38,6 +38,16 @@ def __init__(self, sim): }) self._spaces_in_preferred_format = True + @property + def unwrapped(self): + """ + Fall through all the wrappers and obtain the original, completely unwrapped simulation. + """ + try: + return self.sim.unwrapped + except AttributeError: + return self.sim + def reset(self): """See SimulationManager.""" return self.sim.reset() diff --git a/tests/test_workflow.py b/tests/test_workflow.py index 13d63c3d..3e1846c2 100644 --- a/tests/test_workflow.py +++ b/tests/test_workflow.py @@ -121,6 +121,8 @@ def tally_rewards(sim, trainer): trainer: Trainer that computes actions using the trained policies. """ + from abmarl.managers import SimulationManager + assert isinstance(sim, SimulationManager), "sim must be a SimulationManager." # Run the simulation with actions chosen from the trained policies policy_agent_mapping = trainer.config['multiagent']['policy_mapping_fn'] for episode in range(5):