Skip to content

Commit

Permalink
Merge pull request #12 from opendilab/gl-dev
Browse files Browse the repository at this point in the history
update version 0.3.0
  • Loading branch information
RobinC94 authored Jan 6, 2022
2 parents 73f0009 + cc59cf6 commit 849bd5c
Show file tree
Hide file tree
Showing 102 changed files with 2,712 additions and 1,728 deletions.
14 changes: 14 additions & 0 deletions CHANGELOG
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
## v0.3.0 (2022.1.7)
- Add MetaDrive support (env, entry, doc)
- Add simulator selection when import
- Rename all common modules without 'carla'
- Add MetaDrive basic env training
- Add MetaDrive Macro env training and test
- Add all env registry
- Update DriveEnvWrapper
- Update logo
- Update doc, add metadrive guide and tutorial
- Reformat simple rl, all policies now have common entry(train eval test)
- Fix LBC planner bug
- Delete config helper

## v0.2.2 (2021.12.3)
- Add fail count and retry for benchmark collector
- Add LBC Image training phase 0 & 1
Expand Down
17 changes: 11 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# DI-drive

<img src="./docs/figs/DI-drive.png" width="200" alt="icon"/>
<img src="./docs/figs/di-drive_logo.png" width="200" alt="icon"/>

Updated on 2021.12.3 DI-drive-v0.2.2 (beta)
Updated on 2022.1.7 DI-drive-v0.3.0 (beta)

DI-drive - Decision Intelligence Platform for Autonomous Driving simulation.

Expand All @@ -24,16 +24,18 @@ and most importantly, to **put these all together!**

**DI-drive** uses [DI-engine](https://github.com/opendilab/DI-engine), a Reinforcement Learning
platform to build most of the running modules and demos. **DI-drive** currently supports [Carla](http://carla.org),
an open-source Autonomous Drining simulator to operate driving simualtion.
an open-source Autonomous Drining simulator to operate driving simualtion, and [MetaDrive](https://decisionforce.github.io/metadrive/),
a diverse driving scenarios for Generalizable Reinforcement Learning. Users can specify any of them to run in global config under `core`.

## Installation

**DI-drive** needs to run [Carla](http://carla.org) server for simulation. Besides, The client needs to have the following modules installed:
**DI-drive** needs to have the following modules installed:

- Pytorch
- DI-engine
- Carla Python API

[MetaDrive](https://decisionforce.github.io/metadrive/) can be easily installed via `pip`.
If [Carla](http://carla.org) server is used for simulation, users need to install 'Carla Python API' in addition.
Please refer to the [documentation](https://opendilab.github.io/DI-drive/) for details about installation and user guide of **DI-drive**.
We provide IL and RL tutorials, and full guidance for quick run existing policy for beginners.

Expand All @@ -52,6 +54,7 @@ Please refer to [FAQ](https://opendilab.github.io/DI-drive/faq/index.html) for f
- BeV Speed RL
- [Implicit Affordance](https://arxiv.org/abs/1911.10868)
- [Latent DRL](https://arxiv.org/abs/2001.08726)
- MetaDrive Macro RL

## DI-drive Casezoo

Expand All @@ -71,10 +74,12 @@ DI-engine released under the Apache 2.0 license.

## Citation

```latex
@misc{didrive,
title={{DI-drive: OpenDILab} Decision Intelligence platform for Autonomous Driving simulation},
author={DI-drive Contributors},
publisher = {GitHub},
howpublished = {\url{`https://github.com/opendilab/DI-drive`}},
howpublished = {\url{https://github.com/opendilab/DI-drive}},
year={2021},
}
```
15 changes: 14 additions & 1 deletion core/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,19 @@
__TITLE__ = "DI-drive"
__VERSION__ = "0.2.2"
__VERSION__ = "0.3.0"
__DESCRIPTION__ = "Decision AI Auto-Driving Platform"
__AUTHOR__ = "OpenDILab Contributors"
__AUTHOR_EMAIL__ = "[email protected]"
__version__ = __VERSION__

SIMULATORS = ['carla', 'metadrive']

# if 'carla' in SIMULATORS:
# try:
# import carla
# except:
# raise ImportError("Import carla failed! Please install carla Python API first.")
# if 'metadrive' in SIMULATORS:
# try:
# import metadrive
# except:
# raise ImportError("Import metadrive failed! Please install metadrive simulator first.")
2 changes: 1 addition & 1 deletion core/data/base_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from typing import Any, Dict
from easydict import EasyDict

from core.utils.others.config_helper import deep_merge_dicts
from ding.utils.default_helper import deep_merge_dicts
from ding.utils import EasyTimer


Expand Down
24 changes: 22 additions & 2 deletions core/data/lbc_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ def __init__(
self._lmdb_list = []
self._idx_list = []

n_episodes = 0

for item in epi_folder:
lmdb_file = lmdb.open(os.path.join(root_dir, item, 'measurements.lmdb')).begin(write=False)
max_len = int(lmdb_file.get('len'.encode())) - self._gap * self._n_step
Expand All @@ -68,12 +70,22 @@ def __init__(
]
png_files.sort()
for i in range(max_len):
if self._max_frames and len(self) >= self._max_frames:
break

png_file = png_files[i]
index = int(png_file.split('_')[1].split('.')[0])
self._img_list.append(os.path.join(root_dir, item, png_file))
self._idx_list.append(index)
self._lmdb_list.append(lmdb_file)

n_episodes += 1

if self._max_frames and len(self) >= self._max_frames:
break

print('%s: %d frames, %d episodes.' % (root_dir, len(self), n_episodes))

def __len__(self):
return len(self._img_list)

Expand Down Expand Up @@ -142,7 +154,7 @@ def __getitem__(self, index) -> Any:
locations.append([pixel_x, pixel_y])
orientations.append([ori_dx, ori_dy])

#birdview = self.bird_view_transform(birdview)
birdview = self.bird_view_transform(birdview)

# Create mask
output_size = self._crop_size // self._down_ratio
Expand Down Expand Up @@ -205,6 +217,8 @@ def __init__(
self._lmdb_list = []
self._idx_list = []

count = 0

for item in epi_folder:
lmdb_file = lmdb.open(os.path.join(root_dir, item, 'measurements.lmdb')).begin(write=False)
max_len = int(lmdb_file.get('len'.encode())) - self._gap * self._n_step
Expand All @@ -219,6 +233,10 @@ def __init__(
self._idx_list.append(index)
self._lmdb_list.append(lmdb_file)

count += max_len

print('Finished loading %s. Length: %d' % (root_dir, count))

def __len__(self):
return len(self._img_list)

Expand Down Expand Up @@ -298,7 +316,9 @@ def __getitem__(self, index) -> Any:
# import pdb; pdb.set_trace()
rgb_images = torch.stack([self.rgb_transform(img) for img in rgb_images])

# birdview = self.bird_view_transform(birdview)
birdview = self.bird_view_transform(birdview)

self._batch_read_number += 1

return {
'rgb': rgb_images,
Expand Down
33 changes: 29 additions & 4 deletions core/envs/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,32 @@
Copyright 2021 OpenDILab. All Rights Reserved:
Description:
'''
from .base_carla_env import BaseCarlaEnv
from .carla_env_wrapper import CarlaEnvWrapper, BenchmarkEnvWrapper
from .simple_carla_env import SimpleCarlaEnv
from .scenario_carla_env import ScenarioCarlaEnv
from gym.envs.registration import register, registry
from core import SIMULATORS
from .base_drive_env import BaseDriveEnv
from .drive_env_wrapper import DriveEnvWrapper, BenchmarkEnvWrapper

envs = []
env_map = {}

if 'carla' in SIMULATORS:
from .simple_carla_env import SimpleCarlaEnv
from .scenario_carla_env import ScenarioCarlaEnv
env_map.update({
"SimpleCarla-v1": 'core.envs.simple_carla_env.SimpleCarlaEnv',
"ScenarioCarla-v1": 'core.envs.scenario_carla_env.ScenarioCarlaEnv'
})

if 'metadrive' in SIMULATORS:
from .md_macro_env import MetaDriveMacroEnv
env_map.update({
"Macro-v1": 'core.envs.md_macro_env:MetaDriveMacroEnv',
})

for k, v in env_map.items():
if k not in registry.env_specs:
envs.append(k)
register(id=k, entry_point=v)

if len(envs) > 0:
print("[ENV] Register environments: {}.".format(envs))
8 changes: 4 additions & 4 deletions core/envs/base_carla_env.py → core/envs/base_drive_env.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,16 @@
import gym
from gym import utils

from core.utils.others.config_helper import deep_merge_dicts
from ding.utils.default_helper import deep_merge_dicts


class BaseCarlaEnv(gym.Env, utils.EzPickle):
class BaseDriveEnv(gym.Env, utils.EzPickle):
"""
Base class for environments. It is inherited from `gym.Env` and uses the same interfaces.
All Carla Env class is supposed to inherit from this class.
All Drive Env class is supposed to inherit from this class.
Note:
To run Reinforcement Learning on DI-engine platform, the environment should be wrapped with `CarlaEnvWrapper`.
To run Reinforcement Learning on DI-engine platform, the environment should be wrapped with `DingEnvWrapper`.
:Arguments:
- cfg (Dict): Config Dict.
Expand Down
55 changes: 40 additions & 15 deletions core/envs/carla_env_wrapper.py → core/envs/drive_env_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,28 +7,28 @@

from core.data.benchmark import ALL_SUITES
from core.eval.carla_benchmark_evaluator import get_suites_list, read_pose_txt, get_benchmark_dir
from .base_carla_env import BaseCarlaEnv
from core.utils.others.config_helper import deep_merge_dicts
from .base_drive_env import BaseDriveEnv
from ding.utils.default_helper import deep_merge_dicts
from ding.envs.env.base_env import BaseEnvTimestep, BaseEnvInfo
from ding.envs.common.env_element import EnvElementInfo
from ding.torch_utils.data_helper import to_ndarray


class CarlaEnvWrapper(gym.Wrapper):
class DriveEnvWrapper(gym.Wrapper):
"""
Environment wrapper to make ``gym.Env`` align with DI-engine definitions, so as to use utilities in DI-engine.
It changes ``step``, ``reset`` and ``info`` method of ``gym.Env``, while others are straightly delivered.
:Arguments:
- env (BaseCarlaEnv): The environment to be wrapped.
- env (BaseDriveEnv): The environment to be wrapped.
- cfg (Dict): Config dict.
:Interfaces: reset, step, info, render, seed, close
"""

config = dict()

def __init__(self, env: BaseCarlaEnv, cfg: Dict = None, **kwargs) -> None:
def __init__(self, env: BaseDriveEnv, cfg: Dict = None, **kwargs) -> None:
if cfg is None:
self._cfg = self.__class__.default_config()
elif 'cfg_type' not in cfg:
Expand All @@ -47,7 +47,9 @@ def reset(self, *args, **kwargs) -> Any:
Any: Observations from environment
"""
obs = self.env.reset(*args, **kwargs)
obs = to_ndarray(obs)
obs = to_ndarray(obs, dtype=np.float32)
if isinstance(obs, np.ndarray) and len(obs.shape) == 3:
obs = obs.transpose((2, 0, 1))
self._final_eval_reward = 0.0
return obs

Expand All @@ -68,12 +70,19 @@ def step(self, action: Any = None) -> BaseEnvTimestep:

obs, rew, done, info = self.env.step(action)
self._final_eval_reward += rew
obs = to_ndarray(obs)
obs = to_ndarray(obs, dtype=np.float32)
if isinstance(obs, np.ndarray) and len(obs.shape) == 3:
obs = obs.transpose((2, 0, 1))
rew = to_ndarray([rew], dtype=np.float32)
if done:
info['final_eval_reward'] = self._final_eval_reward
return BaseEnvTimestep(obs, rew, done, info)

def seed(self, seed: int, dynamic_seed: bool = True) -> None:
self._seed = seed
self._dynamic_seed = dynamic_seed
np.random.seed(self._seed)

def info(self) -> BaseEnvInfo:
"""
Interface of ``info`` method to suit DI-engine format env.
Expand All @@ -83,9 +92,25 @@ def info(self) -> BaseEnvInfo:
:Returns:
BaseEnvInfo: Env information instance defined in DI-engine.
"""
obs_space = self.env.observation_space
act_space = self.env.action_space
return BaseEnvInfo(agent_num=1, obs_space=obs_space, act_space=act_space, use_wrappers=None)
obs_space = EnvElementInfo(shape=self.env.observation_space, value={'min': 0., 'max': 1., 'dtype': np.float32})
act_space = EnvElementInfo(
shape=self.env.action_space,
value={
'min': np.float32("-inf"),
'max': np.float32("inf"),
'dtype': np.float32
},
)
rew_space = EnvElementInfo(
shape=1,
value={
'min': np.float32("-inf"),
'max': np.float32("inf")
},
)
return BaseEnvInfo(
agent_num=1, obs_space=obs_space, act_space=act_space, rew_space=rew_space, use_wrappers=None
)

def enable_save_replay(self, replay_path: Optional[str] = None) -> None:
if replay_path is None:
Expand All @@ -106,15 +131,15 @@ def render(self):
self.env.render()


class BenchmarkEnvWrapper(CarlaEnvWrapper):
class BenchmarkEnvWrapper(DriveEnvWrapper):
"""
Environment Wrapper for Carla Benchmark suite evaluations. It wraps an environment with Benchmark
suite so that the env will always run with a benchmark suite setting. It has 2 mode to get reset
params in a suite: 'random' will randomly get reset param, 'order' will get all reset params in
order.
:Arguments:
- env (BaseCarlaEnv): The environment to be wrapped.
- env (BaseDriveEnv): The environment to be wrapped.
- cfg (Dict): Config dict.
"""

Expand All @@ -124,7 +149,7 @@ class BenchmarkEnvWrapper(CarlaEnvWrapper):
mode='random',
)

def __init__(self, env: BaseCarlaEnv, cfg: Dict, **kwargs) -> None:
def __init__(self, env: BaseDriveEnv, cfg: Dict, **kwargs) -> None:
super().__init__(env, cfg=cfg, **kwargs)
suite = self._cfg.suite
benchmark_dir = self._cfg.benchmark_dir
Expand Down Expand Up @@ -208,9 +233,9 @@ def step(self, action: Dict) -> Any:


# TODO: complete scenario env wrapper
class ScenarioEnvWrapper(CarlaEnvWrapper):
class ScenarioEnvWrapper(DriveEnvWrapper):

config = dict()

def __init__(self, env: BaseCarlaEnv, cfg: Dict, **kwargs) -> None:
def __init__(self, env: BaseDriveEnv, cfg: Dict, **kwargs) -> None:
super().__init__(env, cfg=cfg, **kwargs)
Loading

0 comments on commit 849bd5c

Please sign in to comment.