Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sensor test #527

Merged
merged 33 commits into from
Oct 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
33 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 48 additions & 2 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,54 @@ jobs:
cd metadrive/
pytest --cov=./ --cov-config=.coveragerc --cov-report=xml -sv tests/test_export_record_scenario

test_sensor_pipeline:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Prepare OpenGL
run: |
sudo apt-get -y install xvfb
sudo /usr/bin/Xvfb :0 -screen 0 1280x1024x24 &
- name: Blackbox tests
run: |
pip install cython
pip install numpy
pip install -e .
python -m metadrive.pull_asset
pip install pytest
pip install pytest-cov
pip install ray
cd metadrive/
pytest --cov=./ --cov-config=.coveragerc --cov-report=xml -sv tests/test_sensors

test_examples:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.9
uses: actions/setup-python@v2
with:
python-version: 3.9
- name: Prepare OpenGL
run: |
sudo apt-get -y install xvfb
sudo /usr/bin/Xvfb :0 -screen 0 1280x1024x24 &
- name: Blackbox tests
run: |
pip install cython
pip install numpy
pip install -e .
python -m metadrive.pull_asset
pip install pytest
pip install pytest-cov
pip install ray
cd metadrive/
pytest --cov=./ --cov-config=.coveragerc --cov-report=xml -sv tests/test_examples

test_ipynb:
runs-on: ubuntu-latest
steps:
Expand All @@ -203,8 +251,6 @@ jobs:

test_ros:
runs-on: ubuntu-22.04
# container:
# image: ubuntu:jammy
steps:
- name: Set up ROS2 humble
uses: ros-tooling/[email protected]
Expand Down
10 changes: 7 additions & 3 deletions metadrive/component/sensors/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
class BaseSensor:
def perceive(self, *args, **kwargs):
raise NotImplementedError
# from metadrive.component.sensors.depth_camera import DepthCamera
# from metadrive.component.sensors.rgb_camera import RGBCamera
# from metadrive.component.sensors.semantic_camera import SemanticCamera
# from metadrive.component.sensors.mini_map import MiniMap
# from metadrive.component.sensors.lidar import Lidar
# from metadrive.component.sensors.distance_detector import DistanceDetector, SideDetector, LaneLineDetector
# from metadrive.component.sensors.dashboard import DashBoard
17 changes: 9 additions & 8 deletions metadrive/component/sensors/base_camera.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np
import cv2
from metadrive.component.sensors import BaseSensor
from metadrive.component.sensors.base_sensor import BaseSensor
from metadrive.utils.cuda import check_cudart_err

_cuda_enable = True
Expand All @@ -19,25 +19,26 @@

class BaseCamera(ImageBuffer, BaseSensor):
"""
This class wrapping the ImageBuffer and BaseSensor to implement perceive() function to capture images in the virtual
world. It also extends a support for cuda, so the rendered images can be retained on GPU and converted to torch
tensor directly. The sensor is shared and thus can be set at any position in the world for any objects' use.
To enable the image observation, set image_observation to True.
Every objects share the same camera, to boost the efficiency and save memory.
Camera configuration is read from the global config automatically.
"""
# shape(dim_1, dim_2)
BUFFER_W = 84 # dim 1
BUFFER_H = 84 # dim 2
CAM_MASK = None
display_region_size = [1 / 3, 2 / 3, 0.8, 1.0]
attached_object = None

def __init__(self, engine, setup_pbr=False, need_cuda=False, frame_buffer_property=None):
num_channels = 3

def __init__(self, engine, need_cuda=False, frame_buffer_property=None):
self._enable_cuda = need_cuda
super(BaseCamera, self).__init__(
self.BUFFER_W,
self.BUFFER_H,
Vec3(0., 0.8, 1.5),
self.BKG_COLOR,
setup_pbr=setup_pbr,
engine=engine,
frame_buffer_property=frame_buffer_property
)
Expand All @@ -47,7 +48,7 @@ def __init__(self, engine, setup_pbr=False, need_cuda=False, frame_buffer_proper
if (width > 100 or height > 100) and not self.enable_cuda:
# Too large height or width will cause corruption in Mac.
self.logger.warning(
"You may using too large buffer! The height is {}, and width is {}. "
"You are using too large buffer! The height is {}, and width is {}. "
"It may lower the sample efficiency! Consider reducing buffer size or use cuda image by"
" set [image_on_cuda=True].".format(height, width)
)
Expand Down Expand Up @@ -110,7 +111,7 @@ def perceive(self, base_object, clip=True) -> np.ndarray:
self.track(base_object)
if self.enable_cuda:
assert self.cuda_rendered_result is not None
ret = self.cuda_rendered_result[..., :-1][..., ::-1][::-1]
ret = self.cuda_rendered_result[..., :-1][..., ::-1][::-1][..., :self.num_channels]
else:
ret = self.get_rgb_array_cpu()
if self.engine.global_config["rgb_to_grayscale"]:
Expand Down
15 changes: 15 additions & 0 deletions metadrive/component/sensors/base_sensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
class BaseSensor:
"""
This is the base class of all sensors
"""
def perceive(self, *args, **kwargs):
"""
All sensors have to implement this API as the interface for accessing the sensor output
Args:
*args: varies according to sensor type
**kwargs: varies according to sensor type

Returns: sensor output. It could be matrices like images or other data structures

"""
raise NotImplementedError
Original file line number Diff line number Diff line change
@@ -1,11 +1,14 @@
from panda3d.core import NodePath, PGTop, TextNode, CardMaker, Vec3

from metadrive.component.sensors import BaseSensor
from metadrive.component.sensors.base_sensor import BaseSensor
from metadrive.constants import CamMask
from metadrive.engine.core.image_buffer import ImageBuffer


class VehiclePanel(ImageBuffer, BaseSensor):
class DashBoard(ImageBuffer, BaseSensor):
"""
Dashboard for showing the speed and brake/throttle/steering
"""
PARA_VIS_LENGTH = 12
PARA_VIS_HEIGHT = 1
MAX_SPEED = 120
Expand All @@ -14,7 +17,6 @@ class VehiclePanel(ImageBuffer, BaseSensor):
CAM_MASK = CamMask.PARA_VIS
GAP = 4.1
TASK_NAME = "update panel"
display_region_size = [2 / 3, 1, 0.8, 1.0]

def __init__(self, engine, *, cuda):
if engine.win is None:
Expand Down Expand Up @@ -74,15 +76,14 @@ def __init__(self, engine, *, cuda):

card.setPos(0.2 + self.PARA_VIS_LENGTH / 2, 0, 0.22)
self.para_vis_np[name] = card
super(VehiclePanel, self).__init__(
super(DashBoard, self).__init__(
self.BUFFER_W,
self.BUFFER_H,
Vec3(-0.9, -1.01, 0.78),
self.BKG_COLOR,
parent_node=self.aspect2d_np,
engine=engine
)
# self.add_display_region(self.display_region_size)
self._node_path_list.extend(tmp_node_path_list)

def update_vehicle_state(self, vehicle):
Expand Down Expand Up @@ -112,15 +113,15 @@ def update_vehicle_state(self, vehicle):

def remove_display_region(self):
self.buffer.set_active(False)
super(VehiclePanel, self).remove_display_region()
super(DashBoard, self).remove_display_region()

def add_display_region(self, display_region):
super(VehiclePanel, self).add_display_region(display_region)
super(DashBoard, self).add_display_region(display_region)
self.buffer.set_active(True)
self.origin.reparentTo(self.aspect2d_np)

def destroy(self):
super(VehiclePanel, self).destroy()
super(DashBoard, self).destroy()
for para in self.para_vis_np.values():
para.removeNode()
self.aspect2d_np.removeNode()
Expand Down
51 changes: 27 additions & 24 deletions metadrive/component/sensors/depth_camera.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,27 @@
import cv2
from panda3d.core import Shader, RenderState, ShaderAttrib, GeoMipTerrain, PNMImage, Texture, LightAttrib, \
from panda3d.core import Shader, RenderState, ShaderAttrib, GeoMipTerrain, PNMImage, LightAttrib, \
TextureAttrib, ColorAttrib

from metadrive.component.sensors.base_camera import BaseCamera
from metadrive.constants import CamMask
from metadrive.constants import RENDER_MODE_NONE
from metadrive.engine.asset_loader import AssetLoader
from panda3d.core import FrameBufferProperties


class DepthCamera(BaseCamera):
# shape(dim_1, dim_2)
CAM_MASK = CamMask.DepthCam

GROUND_HEIGHT = -0.5
VIEW_GROUND = False
VIEW_GROUND = True
GROUND = None
GROUND_MODEL = None

num_channels = 1
shader_name = "depth_cam"

def __init__(self, width, height, engine, *, cuda=False):
self.BUFFER_W, self.BUFFER_H = width, height
self.VIEW_GROUND = True # default true
frame_buffer_property = FrameBufferProperties()
frame_buffer_property.set_rgba_bits(8, 8, 8, 0) # disable alpha for RGB camera
# TODO It can be made more efficient by only using one channel
super(DepthCamera, self).__init__(engine, False, cuda)
super(DepthCamera, self).__init__(engine, cuda)
cam = self.get_cam()
lens = self.get_lens()

Expand All @@ -40,21 +37,6 @@ def __init__(self, width, height, engine, *, cuda=False):
# vert_path = AssetLoader.file_path("shaders", "depth_cam_gles.vert.glsl")
# frag_path = AssetLoader.file_path("shaders", "depth_cam_gles.frag.glsl")
# else:
from metadrive.utils import is_mac
if is_mac():
vert_path = AssetLoader.file_path("shaders", "depth_cam_mac.vert.glsl")
frag_path = AssetLoader.file_path("shaders", "depth_cam_mac.frag.glsl")
else:
vert_path = AssetLoader.file_path("shaders", "depth_cam.vert.glsl")
frag_path = AssetLoader.file_path("shaders", "depth_cam.frag.glsl")
custom_shader = Shader.load(Shader.SL_GLSL, vertex=vert_path, fragment=frag_path)
cam.node().setInitialState(
RenderState.make(
LightAttrib.makeAllOff(), TextureAttrib.makeOff(), ColorAttrib.makeOff(),
ShaderAttrib.make(custom_shader, 1)
)
)

if self.VIEW_GROUND:
ground = PNMImage(513, 513, 4)
ground.fill(1., 1., 1.)
Expand All @@ -80,3 +62,24 @@ def track(self, base_object):
# self.GROUND_MODEL.setP(-base_object.origin.getR())
# self.GROUND_MODEL.setR(-base_object.origin.getR())
return super(DepthCamera, self).track(base_object)

def _setup_effect(self):
"""
Setup Camera Effect enabling depth calculation

Returns: None
"""
from metadrive.utils import is_mac
if is_mac():
vert_path = AssetLoader.file_path("shaders", "{}_mac.vert.glsl".format(self.shader_name))
frag_path = AssetLoader.file_path("shaders", "{}_mac.frag.glsl".format(self.shader_name))
else:
vert_path = AssetLoader.file_path("shaders", "{}.vert.glsl".format(self.shader_name))
frag_path = AssetLoader.file_path("shaders", "{}.frag.glsl".format(self.shader_name))
custom_shader = Shader.load(Shader.SL_GLSL, vertex=vert_path, fragment=frag_path)
self.get_cam().node().setInitialState(
RenderState.make(
LightAttrib.makeAllOff(), TextureAttrib.makeOff(), ColorAttrib.makeOff(),
ShaderAttrib.make(custom_shader, 1)
)
)
2 changes: 1 addition & 1 deletion metadrive/component/sensors/distance_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import numpy as np
from panda3d.core import NodePath, LVecBase4

from metadrive.component.sensors import BaseSensor
from metadrive.component.sensors.base_sensor import BaseSensor
from metadrive.constants import CamMask, CollisionGroup
from metadrive.engine.asset_loader import AssetLoader
from metadrive.engine.logger import get_logger
Expand Down
5 changes: 1 addition & 4 deletions metadrive/component/sensors/mini_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,10 @@

class MiniMap(BaseCamera):
CAM_MASK = CamMask.MiniMap
display_region_size = [0., 1 / 3, 0.8, 1.0]

def __init__(self, width, height, z_pos, engine, *, cuda=False):
self.BUFFER_W, self.BUFFER_H, height = width, height, z_pos
frame_buffer_property = FrameBufferProperties()
frame_buffer_property.set_rgba_bits(8, 8, 8, 0) # disable alpha for RGB camera
super(MiniMap, self).__init__(engine=engine, need_cuda=cuda, frame_buffer_property=frame_buffer_property)
super(MiniMap, self).__init__(engine=engine, need_cuda=cuda)

cam = self.get_cam()
lens = self.get_lens()
Expand Down
44 changes: 36 additions & 8 deletions metadrive/component/sensors/rgb_camera.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import panda3d.core as p3d
from direct.filter.FilterManager import FilterManager
from simplepbr import _load_shader_str

from metadrive.component.sensors.base_camera import BaseCamera
from metadrive.constants import CamMask
from metadrive.engine.engine_utils import engine_initialized, get_global_config
from direct.filter.CommonFilters import CommonFilters
from panda3d.core import FrameBufferProperties


class RGBCamera(BaseCamera):
Expand All @@ -14,13 +15,40 @@ class RGBCamera(BaseCamera):

def __init__(self, width, height, engine, *, cuda=False):
self.BUFFER_W, self.BUFFER_H = width, height
frame_buffer_property = FrameBufferProperties()
frame_buffer_property.set_rgba_bits(8, 8, 8, 0) # disable alpha for RGB camera
super(RGBCamera, self).__init__(engine, True, cuda, frame_buffer_property=frame_buffer_property)
super(RGBCamera, self).__init__(engine, cuda)
cam = self.get_cam()
lens = self.get_lens()
# cam.lookAt(0, 2.4, 1.3)
cam.lookAt(0, 10.4, 1.6)

lens.setFov(60)
# lens.setAspectRatio(2.0)

def _setup_effect(self):
"""
Setup simple PBR effect
Returns: None

"""
self.scene_tex = None
self.manager = FilterManager(self.buffer, self.cam)
fbprops = p3d.FrameBufferProperties()
fbprops.float_color = True
fbprops.set_rgba_bits(16, 16, 16, 16)
fbprops.set_depth_bits(24)
fbprops.set_multisamples(self.engine.pbrpipe.msaa_samples)
self.scene_tex = p3d.Texture()
self.scene_tex.set_format(p3d.Texture.F_rgba16)
self.scene_tex.set_component_type(p3d.Texture.T_float)
self.tonemap_quad = self.manager.render_scene_into(colortex=self.scene_tex, fbprops=fbprops)
#
defines = {}
#
post_vert_str = _load_shader_str('post.vert', defines)
post_frag_str = _load_shader_str('tonemap.frag', defines)
tonemap_shader = p3d.Shader.make(
p3d.Shader.SL_GLSL,
vertex=post_vert_str,
fragment=post_frag_str,
)
self.tonemap_quad.set_shader(tonemap_shader)
self.tonemap_quad.set_shader_input('tex', self.scene_tex)
self.tonemap_quad.set_shader_input('exposure', 1.0)
10 changes: 10 additions & 0 deletions metadrive/component/sensors/rgb_depth_camera.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from metadrive.component.sensors.depth_camera import DepthCamera


class RGBDepthCamera(DepthCamera):
"""
(Deprecated) Same as RGBCamera, while the forth channel is for storing depth information
"""
raise DeprecationWarning("This one won't work currently")
shader_name = "rgb_depth_cam"
VIEW_GROUND = False
Loading
Loading